{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from func.search.search_core import SearchCore\n",
    "from func.gobal.data import LLmData\n",
    "from openai import OpenAI\n",
    "from swarm import Swarm, Agent\n",
    "from func.writer.writer_func import *\n",
    "from src.help_function import *\n",
    "from func.log.default_log import DefaultLog\n",
    "from func.writer.literature_review import LiteratureReview\n",
    "\n",
    "llmData= LLmData()\n",
    "log = DefaultLog().getLogger()\n",
    "glm_client = OpenAI(api_key=llmData.api_key, base_url=llmData.base_url)\n",
    "client = Swarm(client=glm_client)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_title_with_abstract(datalist, abstractlist, top_k=10):\n",
    "    # 创建paper_id到title的映射字典\n",
    "    paper_id_to_title = {}\n",
    "    for key, value in datalist.items():\n",
    "        paper_id = value['paper_id']\n",
    "        title = value['paper_title']\n",
    "        paper_id_to_title[paper_id] = title\n",
    "\n",
    "    # 将paper_id和title与摘要信息进行匹配\n",
    "    title_with_abstract = []\n",
    "    for key,value in abstractlist.items():\n",
    "        d = {\"title\":paper_id_to_title[key],\"abstract\":value}\n",
    "        title_with_abstract.append(d)\n",
    "    title_with_abstract = title_with_abstract[:top_k]\n",
    "    return title_with_abstract\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_agent(topic=None, debug=False, k=10):\n",
    "    searchCore = SearchCore()\n",
    "    writer = LiteratureReviewWriter()\n",
    "    lit_review = LiteratureReview(topic)\n",
    "\n",
    "    # 第一步先判断主题类型\n",
    "    log.info(f\"=========主题类型判断===========\")\n",
    "    topic_type = writer.topic_classification(topic)\n",
    "    log.info(f\"主题: {topic} 类型: {topic_type}\")\n",
    "    # 第二步根据文献主题获取文献数据\n",
    "    log.info(f\"=========文献数据获取===========\")\n",
    "    datalist = searchCore.search_konwledge(topic)\n",
    "    log.info(\"文献数据：\" + str(len(datalist)))\n",
    "    # 第三步根据文献数据，抽取文献摘要\n",
    "    log.info(f\"=========文献摘要提取===========\")\n",
    "    abstractlist = searchCore.abstract_extract(datalist)\n",
    "    log.info(\"摘要数据：\" + str(len(abstractlist)))\n",
    "    # 第四步根据文献数据，分段总结提炼知识点\n",
    "    log.info(f\"=========知识点总结提炼===========\")\n",
    "    title_with_abstract = get_title_with_abstract(datalist, abstractlist, top_k=10)\n",
    "    log.info(\"提炼标题与摘要：\" + str(len(title_with_abstract)))\n",
    "    log.info(f\"=========文献综述结构生成===========\")\n",
    "    structure = writer.get_structure(topic, topic_type, data=title_with_abstract)\n",
    "    lit_review.set_structure(structure)\n",
    "    # 新增代码start\n",
    "    structure_md = lit_review.structure_to_md(structure)\n",
    "    numbered_structure = lit_review.add_numbering_to_structure(structure)\n",
    "    lit_review.set_structure(numbered_structure)\n",
    "    # 新增代码End\n",
    "    structure_list = lit_review.structure_to_list(structure) #大纲列表（字符串）\n",
    "    log.info(f\"文献综述结构: {structure_md}\")\n",
    "    # 第五步根据结构，生成文献综述的初稿\n",
    "    log.info(f\"=========文献综述初稿生成===========\")\n",
    "    sub_section_content = writer.write_subsections_content_parallel(structure_list, structure_md)\n",
    "    lit_review.set_content(sub_section_content)\n",
    "    lit_review.content_to_LitReview()\n",
    "    # 第六步将文献综述初稿保存到本地\n",
    "    log.info(f\"=========文献综述初稿保存===========\")\n",
    "    lit_review.LitReview_to_md(\"lite_review/\")\n",
    "\n",
    "    return lit_review.lit_review"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    topic = \"The Current State of Research on Chain-of-Thought Prompting Methods\"\n",
    "    literature_review = run_agent(topic=topic,debug=True, k=10)\n",
    "    print(literature_review)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "topic = \"The Current State of Research on Chain-of-Thought Prompting Methods\"\n",
    "searchCore = SearchCore()\n",
    "writer = LiteratureReviewWriter()\n",
    "lit_review = LiteratureReview(topic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:19:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m2\u001B[0m - \u001B[37m=========主题类型判断===========\u001B[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:19:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m4\u001B[0m - \u001B[37m主题: The Current State of Research on Chain-of-Thought Prompting Methods 类型: {type: 对一个方向研究现状的综述}\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 第一步先判断主题类型\n",
    "log.info(f\"=========主题类型判断===========\")\n",
    "topic_type = writer.topic_classification(topic)\n",
    "log.info(f\"主题: {topic} 类型: {topic_type}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:19:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m2\u001B[0m - \u001B[37m=========文献数据获取===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m173\u001B[0m - \u001B[37mquestion:The Current State of Research on Chain-of-Thought Prompting Methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m175\u001B[0m - \u001B[37m=========问题关键字抽取===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mquestion_key_extract\u001B[0m:\u001B[36m47\u001B[0m - \u001B[37m\n",
      "        Please extract the keywords from the user's question:\"The Current State of Research on Chain-of-Thought Prompting Methods\".\n",
      "        output a keyword array in order from important to unimportant:{\"keyword_list\":[\"keyword1\",\"keyword2\",\"keyword...n\"]}.\n",
      "        Note that there should be no duplication between the extracted keywords.\n",
      "        \u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m183\u001B[0m - \u001B[37m主要关键字:Chain-of-Thought\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m184\u001B[0m - \u001B[37m{'keyword_list': ['Prompting', 'Methods', 'Research', 'Current', 'State']}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m186\u001B[0m - \u001B[37m{'keyword_list': ['Prompting', 'Methods', 'Research', 'Current', 'State']}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m189\u001B[0m - \u001B[37m=========转换近义词===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msynonyms_extract\u001B[0m:\u001B[36m72\u001B[0m - \u001B[37m\n",
      "        Please list the homophones and synonyms of \"Chain-of-Thought\".\n",
      "        The output format is an array:{\"keyword_list\":[\"keyword1\",\"keyword2\",\"keyword...n\"]}\n",
      "        \u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:25\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m193\u001B[0m - \u001B[37m{'keyword_list': ['Chain-of-Thinking', 'Thought-Chain', 'Reasoning-Chain', 'Line-of-Thought', 'Thought-Process', 'Cognitive-Chain', 'Sequential-Thinking', 'Step-by-Step-Thought', 'Rational-Sequence', 'Thought-Trail', 'Chain-of-Thought']}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:25\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m196\u001B[0m - \u001B[37m=========检索标题[模糊匹配]===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thinking，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Thought-Chain，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:31\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Reasoning-Chain，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:32\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Line-of-Thought，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:34\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Thought-Process，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:35\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Cognitive-Chain，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:36\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Sequential-Thinking，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:38\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Step-by-Step-Thought，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:39\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Rational-Sequence，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Thought-Trail，搜索结果:0\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m201\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought，搜索结果:22\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m206\u001B[0m - \u001B[37m['6642c61c01d2a3fbfc2544a0', '646d8642d68f896efa0a2c44', '646d8642d68f896efa0a3040', '6528a864939a5f408257a07b', '65309159939a5f4082843df1', '653728be939a5f408235ebfb', '642b955a90e50fcafd82aa1f', '64659ad1d68f896efa8751af', '65b0774d939a5f4082b09741', '6459ac58d68f896efa657f9b', '657bbddc939a5f4082f1cd34', '65b078c6939a5f4082b301d6', '65fcead513fb2c6cf6927310', '63a2794890e50fcafd293fe0', '63a2794d90e50fcafd2945e6', '64563889d68f896efacf5782', '6459ac63d68f896efa658aaf', '646aeca9d68f896efa05a572', '646d863bd68f896efa09ed17', '649a5e2ad68f896efad8463f', '633f98d790e50fcafd78e268', '65cad455939a5f4082f2daa0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m207\u001B[0m - \u001B[37m搜索标题id: 22\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:45\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:6642c61c01d2a3fbfc2544a0，搜索结果:8\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:47\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:646d8642d68f896efa0a2c44，搜索结果:12\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:49\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:646d8642d68f896efa0a3040，搜索结果:8\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:50\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:6528a864939a5f408257a07b，搜索结果:7\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:52\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:65309159939a5f4082843df1，搜索结果:6\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:54\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:653728be939a5f408235ebfb，搜索结果:5\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:56\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:642b955a90e50fcafd82aa1f，搜索结果:9\u001B[0m\n",
      "\u001B[32m2025-02-04 23:19:58\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:64659ad1d68f896efa8751af，搜索结果:4\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:00\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:65b0774d939a5f4082b09741，搜索结果:13\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:6459ac58d68f896efa657f9b，搜索结果:13\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:04\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:657bbddc939a5f4082f1cd34，搜索结果:10\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:05\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:65b078c6939a5f4082b301d6，搜索结果:7\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:65fcead513fb2c6cf6927310，搜索结果:5\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:10\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:63a2794890e50fcafd293fe0，搜索结果:13\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:13\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:63a2794d90e50fcafd2945e6，搜索结果:9\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:14\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:64563889d68f896efacf5782，搜索结果:5\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:15\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:6459ac63d68f896efa658aaf，搜索结果:12\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:18\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:646aeca9d68f896efa05a572，搜索结果:4\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:20\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:646d863bd68f896efa09ed17，搜索结果:13\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:649a5e2ad68f896efad8463f，搜索结果:5\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:633f98d790e50fcafd78e268，搜索结果:6\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m211\u001B[0m - \u001B[37mpaper_id:65cad455939a5f4082f2daa0，搜索结果:10\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m218\u001B[0m - \u001B[37m搜索标题索引: 22\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m219\u001B[0m - \u001B[37m搜索标题: 184\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m222\u001B[0m - \u001B[37m=========检索论文片段[向量搜索]===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m226\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought Prompting，搜索结果:30\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m226\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought Methods，搜索结果:30\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:35\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m226\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought Research，搜索结果:30\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:38\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m226\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought Current，搜索结果:30\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m226\u001B[0m - \u001B[37m搜索关键字:Chain-of-Thought State，搜索结果:30\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36msearch_konwledge\u001B[0m:\u001B[36m237\u001B[0m - \u001B[37m最终搜索文献: 232\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m4\u001B[0m - \u001B[37m文献数据：232\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 第二步根据文献主题获取文献数据\n",
    "log.info(f\"=========文献数据获取===========\")\n",
    "datalist = searchCore.search_konwledge(topic)\n",
    "log.info(\"文献数据：\" + str(len(datalist)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m2\u001B[0m - \u001B[37m=========文献摘要提取===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6642c61c01d2a3fbfc2544a0,paper_title:LLMs Can Find Mathematical Reasoning Mistakes by Pedagogical Chain-of-Thought\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646d8642d68f896efa0a2c44,paper_title:The CoT Collection: Improving Zero-shot and Few-shot Learning of Language Models Via Chain-of-Thought Fine-Tuning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646d8642d68f896efa0a3040,paper_title:Exploring Chain-of-Thought Style Prompting for Text-to-SQL\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6528a864939a5f408257a07b,paper_title:Prompting Large Language Models with Chain-of-Thought for Few-Shot Knowledge Base Question Generation\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65309159939a5f4082843df1,paper_title:Chain-of-Thought Tuning: Masked Language Models Can Also Think Step by Step in Natural Language Understanding\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:41\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:653728be939a5f408235ebfb,paper_title:CoF-CoT: Enhancing Large Language Models with Coarse-to-Fine Chain-of-Thought Prompting for Multi-domain NLU Tasks\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:642b955a90e50fcafd82aa1f,paper_title:Chain-of-Thought Predictive Control\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64659ad1d68f896efa8751af,paper_title:Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65b0774d939a5f4082b09741,paper_title:BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6459ac58d68f896efa657f9b,paper_title:T-SciQ: Teaching Multimodal Chain-of-Thought Reasoning Via Large Language Model Signals for Science Question Answering.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:657bbddc939a5f4082f1cd34,paper_title:Multi-modal Latent Space Learning for Chain-of-Thought Reasoning in Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65b078c6939a5f4082b301d6,paper_title:KAM-CoT: Knowledge Augmented Multimodal Chain-of-Thoughts Reasoning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65fcead513fb2c6cf6927310,paper_title:A Chain-of-Thought Prompting Approach with LLMs for Evaluating Students' Formative Assessment Responses in Science\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:63a2794890e50fcafd293fe0,paper_title:Towards Understanding Chain-of-Thought Prompting: an Empirical Study of What Matters.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:63a2794d90e50fcafd2945e6,paper_title:Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64563889d68f896efacf5782,paper_title:SCOTT: Self-Consistent Chain-of-Thought Distillation\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6459ac63d68f896efa658aaf,paper_title:Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning   by Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646aeca9d68f896efa05a572,paper_title:Reasoning Implicit Sentiment with Chain-of-Thought Prompting.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646d863bd68f896efa09ed17,paper_title:Element-aware Summarization with Large Language Models: Expert-aligned Evaluation and Chain-of-Thought Method.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:649a5e2ad68f896efad8463f,paper_title:Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:633f98d790e50fcafd78e268,paper_title:Language Models are Multilingual Chain-of-Thought Reasoners\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65cad455939a5f4082f2daa0,paper_title:Generating Chain-of-Thoughts with a Pairwise-Comparison Approach to Searching for the Most Promising Intermediate Thought\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64e432bf3fda6d7f0600b0dd,paper_title:Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:42\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:636482d890e50fcafdccb12b,paper_title:Inverse Scaling Can Become U-shaped\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:63f6da1090e50fcafd25707b,paper_title:Guiding Large Language Models Via Directional Stimulus Prompting.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:44\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646edc9cd68f896efaddab9d,paper_title:MQuAKE: Assessing Knowledge Editing in Language Models Via Multi-Hop Questions.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:45\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6232a74d5aee126c0fe13e4a,paper_title:Iteratively Prompt Pre-trained Language Models for Chain of Thought\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:45\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:651a282d3fda6d7f0600a09e,paper_title:Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:48\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:652378c7939a5f4082e153d9,paper_title:Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:49\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6579183b939a5f4082d9f8df,paper_title:Get an A in Math: Progressive Rectification Prompting\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:49\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65252a62939a5f40826a5d04,paper_title:DialCoT Meets PPO: Decomposing and Exploring Reasoning Paths in Smaller Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:51\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65d4162a939a5f4082e148c4,paper_title:Boosting of Thoughts: Trial-and-Error Problem Solving with Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:52\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:659cf483939a5f4082bca2f6,paper_title:Advancing Spatial Reasoning in Large Language Models: an In-Depth Evaluation and Enhancement Using the StepGame Benchmark\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:55\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6514e2043fda6d7f062dc9a9,paper_title:Graph Neural Prompting with Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:57\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65f8f1a913fb2c6cf6673ac2,paper_title:A Comprehensive Study of Multimodal Large Language Models for Image Quality Assessment\u001B[0m\n",
      "\u001B[32m2025-02-04 23:20:58\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:633ba44890e50fcafdfe4f9e,paper_title:Complexity-Based Prompting for Multi-Step Reasoning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:00\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6563ff3f939a5f4082214220,paper_title:Boosting the Power of Small Multimodal Reasoning Models to Match Larger Models with Self-Consistency Training\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:01\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:656e8ec6939a5f4082874a70,paper_title:Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:652379bb939a5f4082e1b911,paper_title:Amortizing Intractable Inference in Large Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:04\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65026d513fda6d7f06474c6c,paper_title:Auto-Regressive Next-Token Predictors Are Universal Learners\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65efb75313fb2c6cf63287da,paper_title:TRAD: Enhancing LLM Agents with Step-Wise Thought Retrieval and Aligned Decision\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64796914d68f896efa134af5,paper_title:Interpretable Math Word Problem Solution Generation Via Step-by-step   Planning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:659e2323939a5f40828a33de,paper_title:Chain-of-Table: Evolving Tables in the Reasoning Chain for Table Understanding\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6583b18a939a5f408229ae3a,paper_title:Turning Dust into Gold: Distilling Complex Reasoning Capabilities from LLMs by Leveraging Negative Data\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:09\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:653728be939a5f408235ec00,paper_title:Plan, Verify and Switch: Integrated Reasoning with Diverse X-of-Thoughts\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:10\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6448967c71ac66d2cbd8815d,paper_title:Answering Questions by Meta-Reasoning over Multiple Chains of Thought.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:10\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:62393e845aee126c0f1265e1,paper_title:Self-Consistency Improves Chain of Thought Reasoning in Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:10\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:646edc9cd68f896efaddac1d,paper_title:Chain-of-Questions Training with Latent Answers for Robust Multistep Question Answering.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:11\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:664aaea501d2a3fbfc78a686,paper_title:ECR-Chain: Advancing Generative Language Models to Better Emotion-Cause Reasoners Through Reasoning Chains\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:13\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64ab82833fda6d7f06f77dee,paper_title:Teaching Arithmetic to Small Transformers\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:14\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:63a2794d90e50fcafd294603,paper_title:A Survey of Deep Learning for Mathematical Reasoning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:15\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:65add4c1939a5f40821f2b70,paper_title:Escape Sky-high Cost: Early-stopping Self-Consistency for Multi-step Reasoning\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:15\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:657281cc939a5f408226a69d,paper_title:Chain of Code: Reasoning with a Language Model-Augmented Code Emulator\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:17\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6346305e90e50fcafda0792f,paper_title:GENIE: Higher-Order Denoising Diffusion Solvers\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:620a1d7ed18a2b509616a9d6,paper_title:R5: Rule Discovery with Reinforced and Recurrent Relational Reasoning.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:22\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64796919d68f896efa134e04,paper_title:Minding Language Models' (lack Of) Theory of Mind: A Plug-and-Play Multi-Character Belief Tracker.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:23\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6456389bd68f896efacf6b5b,paper_title:Faithful Question Answering with Monte-Carlo Planning.\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6552dfec939a5f40823b2603,paper_title:ViLMA: A Zero-Shot Benchmark for Linguistic and Temporal Grounding in Video-Language Models\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:6348d36b90e50fcafd546605,paper_title:A Direct Approximation of AIXI Using Logical State Abstractions\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:27\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64dc49903fda6d7f06389d06,paper_title:Learning to Identify Critical States for Reinforcement Learning from Videos\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.search.search_core\u001B[0m:\u001B[36mabstract_extract\u001B[0m:\u001B[36m153\u001B[0m - \u001B[37m处理摘要paper_id:64ffca703fda6d7f06cd8972,paper_title:Ansatz-Agnostic Exponential Resource Saving in Variational Quantum Algorithms Using Shallow Shadows\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m4\u001B[0m - \u001B[37m摘要数据：61\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 第三步根据文献数据，抽取文献摘要\n",
    "log.info(f\"=========文献摘要提取===========\")\n",
    "abstractlist = searchCore.abstract_extract(datalist)\n",
    "log.info(\"摘要数据：\" + str(len(abstractlist)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:21:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m2\u001B[0m - \u001B[37m=========知识点总结提炼===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m4\u001B[0m - \u001B[37m提炼标题与摘要：10\u001B[0m\n",
      "\u001B[32m2025-02-04 23:21:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m5\u001B[0m - \u001B[37m=========文献综述结构生成===========\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:00\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m10\u001B[0m - \u001B[37m文献综述结构: # 思维链提示方法的当前研究现状\n",
      "\n",
      "## 引言\n",
      "### 研究背景与意义\n",
      "### 思维链提示方法的概述\n",
      "### 文章结构安排\n",
      "\n",
      "## 思维链提示方法的基本原理\n",
      "### 思维链提示的定义与工作原理\n",
      "### 思维链提示的设计原则\n",
      "### 思维链提示的应用场景\n",
      "\n",
      "## 思维链提示方法的最新进展\n",
      "### 基于教育理论的思维链提示策略\n",
      "### 思维链微调方法\n",
      "### 思维链提示在文本到SQL解析中的应用\n",
      "### 思维链提示在知识库问题生成中的应用\n",
      "### 思维链提示在自然语言理解任务中的应用\n",
      "### 思维链提示在多领域自然语言理解任务中的应用\n",
      "### 思维链提示在复杂低级控制任务中的应用\n",
      "### 自动思维链提示推理方法\n",
      "### 思维链提示的后门攻击方法\n",
      "### 基于大型语言模型信号的多模态思维链推理教学方法\n",
      "\n",
      "## 思维链提示方法的挑战与未来方向\n",
      "### 思维链提示方法的局限性\n",
      "### 思维链提示方法的改进策略\n",
      "### 思维链提示方法的未来研究方向\n",
      "\n",
      "## 结论\n",
      "### 思维链提示方法的研究总结\n",
      "### 思维链提示方法的潜在影响\n",
      "### 对思维链提示方法未来研究的展望\n",
      "\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 第四步根据文献数据，分段总结提炼知识点\n",
    "log.info(f\"=========知识点总结提炼===========\")\n",
    "title_with_abstract = get_title_with_abstract(datalist, abstractlist, top_k=10)\n",
    "log.info(\"提炼标题与摘要：\" + str(len(title_with_abstract)))\n",
    "log.info(f\"=========文献综述结构生成===========\")\n",
    "structure = writer.get_structure(topic, topic_type, data=title_with_abstract)\n",
    "lit_review.set_structure(structure)\n",
    "structure_md = lit_review.structure_to_md(structure) # 整体大纲内容\n",
    "structure_list = lit_review.structure_to_list(structure) #大纲列表（字符串）\n",
    "log.info(f\"文献综述结构: {structure_md}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m2025-02-04 23:22:00\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36m__main__\u001B[0m:\u001B[36m<module>\u001B[0m:\u001B[36m2\u001B[0m - \u001B[37m=========文献综述初稿生成===========\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "Writing subsections:   0%|          | 0/22 [00:00<?, ?it/s]c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting method: Basic principles of thought chain prompting method - Definition and working principle of thought chain prompting\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Introduction - Research background and significance\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Introduction - Article structure arrangement\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thinking chain prompting methods: Basic principles of thinking chain prompting methods - Design principles of thinking chain prompting\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: The current research status of thought chain prompting methods: the latest progress of thought chain prompting methods - thought chain fine-tuning methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting method: Latest progress of thought chain prompting method - Application of thought chain prompting in knowledge base problem generation\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Latest progress of thought chain prompting methods - Application of thought chain prompting in natural language understanding tasks\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:02\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Conclusion - Summary of research on thought chain prompting methods\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Challenges and future directions of thought chain prompting methods - Future research directions of thought chain prompting methods\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: The current research status of thought chain prompting methods: the latest progress of thought chain prompting methods - backdoor attack methods for thought chain prompting\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting method: Latest progress of thought chain prompting method - Application of thought chain prompting in complex low-level control tasks\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Latest progress of thought chain prompting methods - Application of thought chain prompting in multi domain natural language understanding tasks\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Conclusion - Potential impact of thought chain prompting methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: The current research status of thinking chain prompting methods: the latest progress of thinking chain prompting methods - thinking chain prompting strategies based on educational theory\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting method: Basic principles of thought chain prompting method - Application scenarios of thought chain prompting\u001B[0m\n",
      "c:\\Users\\admin\\AppData\\Local\\anaconda3\\envs\\LLM_research\\lib\\site-packages\\urllib3\\connectionpool.py:1097: InsecureRequestWarning: Unverified HTTPS request is being made to host '127.0.0.1'. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#tls-warnings\n",
      "  warnings.warn(\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thinking chain prompting methods: challenges and future directions of thinking chain prompting methods - improvement strategies for thinking chain prompting methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thinking chain prompting methods: Challenges and future directions of thinking chain prompting methods - Limitations of thinking chain prompting methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Conclusion - Prospects for future research on thought chain prompting methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting method: Latest progress of thought chain prompting method - Application of thought chain prompting in text to SQL parsing\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:03\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: The current research status of thought chain prompting methods: the latest progress of thought chain prompting methods - automatic thought chain prompting reasoning methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:04\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: The current research status of thinking chain prompting methods: the latest progress of thinking chain prompting methods - a multimodal thinking chain reasoning teaching method based on large language model signals\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:04\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m109\u001B[0m - \u001B[37m查询问题: Current research status of thought chain prompting methods: Introduction - Overview of thought chain prompting methods\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:04\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[0] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 1: {'id': 1, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[1] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 2: {'id': 2, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[2] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 3: {'id': 3, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}, 4: {'id': 4, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[4] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 5 Related Work\\nMemorization and Reasoning in PLMs. With the recent success of large-scale pre-trained language models (PLMs), there has been growing interest in investigating what is captured by these PLMs during pre-training ( Talmor et al. ,2020a ;Rogers et al. ,2020 ;Kassner et al. ,2020 ). Studies have shown that in addition to learning linguistic knowledge about language use, PLMs are capable of memorizing a great amount of world knowledge ( Rogers et al. ,2020 ), yielding competitive performance on knowledge probing ( Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ) and other knowledge-intensive tasks such as question answering ( Roberts et al. ,2020 ) and fact checking (Lee et al. ,2020 ), without resorting to any external knowledge source. On the other hand, other work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in recalling their stored knowledge for multi-step inferences, such as answering complex, multi-hop  \\n\\nFor existing work on learning-based prompting, (Shin et al. ,2020 ) proposes to use gradient-guided search to find appropriate discrete prompt tokens in the PLM’s vocabulary to form prompt templates. While the resulting prompts are readable, most of them have very low fluency and interpretability. (Zhong et al. ,2021 ;Qin and Eisner ,2021 ) propose to optimize the prompts in continuous space instead, which shows large benefits in both effectiveness and optimization efficiency. ( Zhong et al. ,2021 ) also raises and studies the question about whether learning-based prompting could exploit spurious dataset regularities which would weaken the validity of standard evaluation results, a concern we seriously address in our work. ( Lester et al. ,2021 ;Li and Liang ,2021 ) follow the continuous prompting paradigm, and tune task-level prompts for lightweight adaptation of PLMs. Overall, existing prompt learning methods are either restricted to cases where there exists a single & identifiable relation/predicate within the query ( Zhong et al. ,2021 ;Qin and Eisner ,2021 ;Shin et al. ,2020 ), or being static and not sensitive to sample-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ).  \\n\\nIterative Knowledge Retrieval. We are also inspired by methods that iteratively retrieve knowledge from explicit knowledge sources for multistep reasoning, such as ( Xiong et al. ,2021 ;Qi et al. ,2019 ). Our problem setting could be viewed as iterative retrieval over implicit knowledge in PLMs, instead of from explicit knowledge sources.\\n\\n# 6 Conclusion & Future Work\\nWe explore an iterative prompting framework towards driving a “train of thought” from PLMs for multi-step reasoning tasks. We show the superiority of this iterative scheme, and also effectiveness of our proposed context-aware prompter design, which addresses key limitations of previous prompting methods when applied in this new scheme. In addition, we conduct both quantitative & qualitative analysis on the faithfulness of the learned prompting behaviors. In the future, we aim to further extend and apply our ideas to Language Model pretraining, with the hope that PLMs can be inherently equipped with stronger multi-step reasoning capabilities.\\n\\n# Acknowledgement\\nThe authors would like to thank the OSU NLP group members for their thoughtful comments. This research was sponsored in part by Google Faculty Award, NSF IIS-1815674, NSF CAREER #1942980, NSF OAC-2112606, and Ohio Supercomputer Center ( Center ,1987 ).\\n\\n\\n\\n# A Appendix\\n\\n# A.1 Hyperparameters\\nWe set the batch size to be 32, 128, 32 and train for 70, 50, 40 epochs for 2Wiki, LoT & R4C respectively. Table 5 summarizes other hyperparameters used in our experiments.  \\n\\nTable 5: Hyperparameter settings for all compared methods. lr: learning rate, pt_len: prompt length.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">2Wiki</td><td colspan=\"2\">LoT</td><td colspan=\"2\">R4C</td></tr><tr><td></td><td>lr pt_len</td><td>lr</td><td>pt_len</td><td>lr</td><td>pt_len</td></tr><tr><td>Prompt-T</td><td>8e-3</td><td>80 4e-3</td><td>80</td><td>4e-3</td><td>60</td></tr><tr><td>Prefix-T</td><td>8e-4</td><td>80 4e-4</td><td>60</td><td>4e-4</td><td>80</td></tr><tr><td>PLM-FT</td><td>4e-5</td><td>4e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>PLM-QA</td><td>4e-5</td><td>8e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>Ours</td><td>8e-5</td><td>30</td><td>8e-5 60</td><td>8e-5</td><td>30</td></tr></table></body></html>\\n\\n# A.2 More Examples on Prompter Attention Visualizations\\nFigure 5: Prompter attention visualization. Reasoning type: Comparison.  \\n\\n  \\nFigure 4 ,5 ,6 ,7 show additional example prompter attention visualizations in the 2Wiki dataset, each corresponding to a different reasoning type (composition, comparison, inference & bridge-comparison respectively).   \\nFigure 4: Prompter attention visualization. Reasoning type: Composition.  \\n\\n  \\nFigure 6: Prompter attention visualization. Reasoning type: Inference.  \\n\\n<html><body><table><tr><td>prompt</td><td>prompt</td><td></td><td></td><td></td><td>prompt</td><td>prompt</td><td></td><td>prompt</td><td>prompt</td><td>Which</td><td></td><td>film</td><td></td><td>prompt Which whose director Is</td></tr><tr><td>Which film whose</td><td></td><td>Which film whose ?</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film who</td><td></td><td>film whose</td><td></td><td></td></tr><tr><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is</td><td></td><td>director is younger</td><td></td><td>drector Is</td><td></td><td>younger</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>younger</td><td></td><td></td><td></td><td>younger</td><td></td><td></td></tr><tr><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh uoo</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh oon</td><td></td><td>Kh oon</td></tr><tr><td>uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>Ka</td><td></td><td>oon Ka</td><td></td><td>oon Ka</td><td></td><td>Ka</td><td></td><td>Ka</td></tr><tr><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td></tr><tr><td>Kh uoo</td><td></td><td>Uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>oon</td><td></td><td>oon</td><td></td><td>oon or</td></tr><tr><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td></td><td>or</td><td>or</td><td></td><td>or</td><td></td><td>Idaho</td></tr><tr><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Transfer</td></tr><tr><td>Transfer ?</td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transf</td><td></td><td>Transfer</td><td></td><td>Transfer</td><td></td><td>Transfer ?</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>？</td><td></td><td>?</td><td></td><td></td><td>?</td><td>?</td><td></td><td></td><td>?</td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>S</td><td></td><td>S</td><td></td><td></td><td>S</td><td>S</td><td></td><td>S</td><td></td><td>S</td></tr><tr><td></td><td></td><td></td><td></td><td>oh</td><td></td><td>oh</td><td></td><td></td><td>oh</td><td>oh</td><td></td><td>oh</td><td>oh</td></tr><tr><td></td><td></td><td></td><td></td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>Modi</td><td>Modi</td><td></td><td></td><td>Modi</td></tr><tr><td></td><td></td><td></td><td>!pow</td><td></td><td>Modi</td><td></td><td>is</td><td></td><td></td><td>Modli</td><td></td></tr><tr><td></td><td></td><td></td><td>is</td><td></td><td></td><td></td><td></td><td></td><td></td><td>Is</td><td>is</td></tr><tr><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td></td><td>director of</td><td>director of</td><td></td><td>drector</td><td>drector</td></tr><tr><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td></td><td>Kh</td><td>Kh</td><td></td><td>of Kh</td><td>of Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>oon</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>Ka</td><td>Ka</td><td></td><td>Ka</td><td>Ka</td></tr><tr><td></td><td></td><td></td><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td>Kh</td><td></td><td>Kh</td><td>Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>oon</td><td></td><td>uoo</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td></td><td></td><td>Peter</td><td>Peter</td><td></td><td>Peter</td><td>Peter</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>F</td><td>F</td><td></td><td>F</td><td>F</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>onda</td><td>onda</td><td>onda</td><td></td><td>onda</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>is</td><td></td><td>is</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td>director</td><td></td><td>director</td></tr><tr><td>PeterFondaisdirector of IdahoTransfer,</td><td></td><td></td><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td>of</td><td></td><td>of</td></tr><tr><td>2November1897isdateof birthof SohrabModi, February 23, 1940 is date of birth of Peter Fonda]</td><td></td><td></td><td></td><td></td><td></td><td>Idaho</td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td></tr><tr><td>Cq: [ Sohrab Modi is director of Khoon Ka Khoon,</td><td></td><td></td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transfer</td><td>Transfer November 1897 date birth Modi</td><td>2 is of of S oh rab</td><td>Transfer 2 November 1897 date of birth of S oh Modi</td></tr><tr><td>q: Which film whose director is younger, Khoon Ka Khoon or Idaho Transfer?</td><td></td><td></td><td></td><td></td><td></td><td></td></table></body></html>', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 6'}, 6: {'id': 6, 'title': 'Large Language Models Are Human-Level Prompt Engineers', 'content': '# A PROMPT ENGINEERING IN THE WILD\\nLarge models with natural language interfaces, including models for text generation and image synthesis, have seen an increasing amount of public usage in recent years. As finding the right prompt can be difficult for humans, a number of guides on prompt engineering as well as tools to aid in prompt discovery have been developed. Among others, see, for example:  \\n\\n•https://blog.andrewcantino.com/blog/2021/04/21/prompt-engineering-tips-and-tricks/   \\n•https://techcrunch.com/2022/07/29/a-startup-is-charging-1-99-for-strings-of-text-to-feed-to-dall-e-2/   \\n•https://news.ycombinator.com/item?id=32943224   \\n•https://promptomania.com/stable-diffusion-prompt-builder/   \\n•https://huggingface.co/spaces/Gustavosta/MagicPrompt-Stable-Diffusion  \\n\\nIn this paper we apply APE to generate effective instructions for steering LLMs, but the general framework Algorithm 1 could be applied to steer other models with natural language interfaces so long as an appropriate proposal method and scoring function can be designed.', 'reference': '[6] Large Language Models Are Human-Level Prompt Engineers, ICLR, 2023, chunk 7'}, 7: {'id': 7, 'title': 'Language Models are Multilingual Chain-of-Thought Reasoners', 'content': '# 3 MULTILINGUAL CHAIN -OF -T HOUGHT PROMPTING\\nWe provide an overview of standard prompting and chain-of-thought prompting, as well as their extensions to the multilingual setting, which we illustrate in Table 1 and use in our experiments (§ 4 ).  \\n\\nIn standard prompting, given a prompt in the source language, the model is asked to predict the answer ( Brown et al. ,2020 ;Schick & Schütze ,2021 ). This can be done in a zero-shot or few-shot setting by providing exemplars following the same template as additional input to the model. We refer to this setting as direct answer prediction (D IRECT )as the model directly predicts the answer to the problem. This setting measures the model’s ability to solve problems without any intermediate reasoning steps.  \\n\\nChain-of-thought (C OT; Wei et al. ,2022b ) prompting helps improve many few-shot reasoning tasks, by augmenting few-shot examples with intermediate reasoning steps that should be predicted by the model. In the multilingual setting, we can apply CoT to solve the problem in the native language (N ATIVE -C OT) by predicting the reasoning steps in the original language of the problem. This measures the model’s ability to both understand and solve the problem in a specific language.  \\n\\nAlternatively, we can ask the model to predict the chain of thought in English (EN-C OT) , regardless of the problem language. Such an approach may be useful as English is often used as the source language for cross-lingual transfer ( Hu et al. ,2020 ) and has been found effective when used as the prompt language ( Zhao & Schütze ,2021 ;Winata et al. ,2021 ;Lin et al. ,2021b ).  \\n\\nFinally, we can translate the problem to English and solve it with English CoT (T RANSLATE -EN) . In this setting, we use the Google Translate API to translate problems into English. This mirrors the translate-train setup ( Hu et al. ,2020 ;Xue et al. ,2021 ;Ruder et al. ,2021 ), the best-performing setting for fine-tuning multilingual models where the training data is translated to English.  \\n\\n<html><body><table><tr><td></td><td>DIRECT</td><td>NATIVE-COT</td><td>EN-CoT</td><td>TRANSLATE-EN</td></tr><tr><td>NATIVE-EXEMPLARS</td><td></td><td></td><td></td><td></td></tr><tr><td>ENGLISH-EXEMPLARS</td><td></td><td>N/A</td><td></td><td>N/A</td></tr><tr><td>MULTILINGUAL-EXEMPLARS</td><td></td><td></td><td></td><td>N/A</td></tr></table></body></html>  \\n\\n  \\nTable 2: Possible combinations between few-shot exemplar selection and solution strategies.   \\nFigure 3: The chain-of-thought prompts and example model outputs in the MGSM experiments. The solutions are written in the same language as the questions of interest (N ATIVE -C OT).  \\n\\nBeyond the prompting methods, there are different ways to provide few-shot examples in context for multilingual prompting:  \\n\\n•All native question exemplars (N ATIVE -E XEMPLARS ). We use a few in-language questions together with their solutions as the few-shot prompt exemplars. This is the most natural setting when we have a few examples in each investigated language.   \\n•All English question exemplars (E NGLISH -E XEMPLARS ). When we are unable to access any existing questions or solution examples in some languages, an intuitive way is to use English questions and solutions as exemplars to perform zero-shot cross-lingual transfer. Note that it is unrealistic to combine this exemplar selection setting with N ATIVE -C OT, since we assume no access to the native language for prompting.   \\n•Generic multilingual question exemplars (M ULTILINGUAL -E XEMPLARS ). Similar to ENGLISH -E XEMPLARS , we assume access to questions and solutions in a few languages, and test if multilingual exemplars better elicit the multilingual reasoning ability of models.  \\n\\nFor T RANSLATE -EN, as all exemplar questions and solutions are in English, we only experiment with the translated native question exemplars and English CoT. We summarize the combinations of prompting and exemplar methods in Table 2 , and present an illustration in Figure 3 . Detailed prompting input for each investigated combination can be found in Appendix A.2 .\\n\\n# 4 EXPERIMENTS ON MGSM\\nIn this section, we evaluate the multilingual reasoning abilities of two representative state-of-the-art pretrained large language models—GPT-3 ( Brown et al. ,2020 ) and PaLM ( Chowdhery et al. ,2022 )—on our MGSM benchmark in various prompting settings using exemplars in the source language Table 3: Accuracy $(\\\\%)$ on MGSM of different models and languages with exemplar questions in native languages (N ATIVE -E XEMPLARS ). HRL : average performance across high-resource languages with larger than $0.1\\\\%$ frequency in the training corpora; URL : average performance across underrepresented languages. We use 6 questions and solutions as the few-shot exemplar whenever possible: while the token number for 6-shot prompts in some languages may exceed the token number limit of GPT-3, we use the maximum possible number of exemplars instead for these cases. Detailed numbers of exemplars for each language in GPT-3 experiments can be found in Appendix A.1 . The best numbers in each column are in boldface .  \\n\\n<html><body><table><tr><td></td><td>AVG</td><td>HRL</td><td>URL</td><td>EN</td><td>DE</td><td>FR</td><td>ES</td><td>RU</td><td>ZH</td><td>JA</td><td>TH</td><td>TE</td><td>BN</td><td>SW</td></tr><tr><td>Lang. Freq. (PaLM, %)</td><td></td><td>一</td><td></td><td>78.0</td><td>3.5</td><td>3.3</td><td>2.1</td><td>.53</td><td>.40</td><td>.38</td><td>.04</td><td>.02</td><td>.006</td><td>.005</td></tr><tr><td colspan=\"10\">GPT-3(text-davinci-002)</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>DIRECT</td><td>11.7</td><td>15.1</td><td>5.7</td><td>16.0</td><td>14.8</td><td>16.8</td><td>17.2</td><td>12.418.0</td><td></td><td>11.2</td><td>8.8</td><td>0.8</td><td>4.4</td><td>8.8</td></tr><tr><td>NATIVE-COT</td><td>26.4</td><td>34.7</td><td>7.2</td><td>53.6</td><td>36.0</td><td>37.6</td><td>40.4</td><td>28.4</td><td>40.0</td><td>26.0</td><td>10.8</td><td>0.4</td><td>6.4</td><td>11.2</td></tr><tr><td>EN-CoT</td><td>31.6</td><td>39.4</td><td>13.9</td><td>53.6</td><td>44.0</td><td>46.0</td><td>44.8</td><td>28.4</td><td>40.8</td><td>32.4</td><td>19.6</td><td>5.6</td><td>9.6</td><td>20.8</td></tr><tr><td>TRANSLATE-EN</td><td></td><td>45.6 47.5</td><td>40.7</td><td>53.6</td><td>46.4</td><td></td><td>46.4 51.6 48.8 47.2</td><td></td><td></td><td>44.8</td><td>41.2</td><td>42.8</td><td>41.2</td><td>37.6</td></tr><tr><td colspan=\"10\">PaLM-540B</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>DIRECT</td><td>18.6</td><td>19.3</td><td>16.8</td><td>22.0</td><td>18.8</td><td>19.6</td><td>20.0</td><td>22.0</td><td>19.2</td><td>16.0</td><td>16.8</td><td>17.6</td><td>17.2</td><td>15.6</td></tr><tr><td>NATIVE-COT</td><td>48.1</td><td>47.9</td><td>44.9</td><td>62.4</td><td>49.2</td><td>46.4</td><td>56.8</td><td>48.4</td><td>46.8</td><td>40.0</td><td>52.8</td><td>45.6</td><td>46.0</td><td>35.2</td></tr><tr><td>EN-CoT</td><td>51.3</td><td>52.3</td><td>46.8</td><td>62.4</td><td>53.6</td><td>51.2</td><td>58.0</td><td>55.6</td><td>46.0</td><td>49.6</td><td>49.6</td><td>46.8</td><td>46.4</td><td>44.4</td></tr><tr><td>TRANSLATE-EN</td><td>55.0</td><td></td><td>56.3 51.2</td><td>62.4</td><td>57.2</td><td>55.2</td><td>60.0</td><td></td><td>59.6 55.6</td><td>50.0</td><td>50.8</td><td></td><td></td><td>49.653.2 51.2</td></tr></table></body></html>  \\n\\n(N ATIVE -E XEMPLARS ). Throughout this paper, we generate outputs using greedy decoding (i.e., sampling with temperature $\\\\tau=0$ ).', 'reference': '[7] Language Models are Multilingual Chain-of-Thought Reasoners, ICLR, 2023, chunk 1'}, 8: {'id': 8, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[8] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 9: {'id': 9, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[9] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:05\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[0] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}, 1: {'id': 1, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 2: {'id': 2, 'title': 'Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models', 'content': '# Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\\nBilgehan Sel 1 , Ahmad Al-Tawaha 1 , Vanshaj Khattar 1 , Lu Wang 2 , Ruoxi Jia 1 and Ming Jin 1 1 Virginia Tech 2 Microsoft\\n\\n# Abstract\\nCurrent literature, aiming to surpass the “Chain-of-Thought” approach, often resorts to an external modus operandi involving halting, modifying, and then resuming the generation process to boost Large Language Models’ (LLMs) reasoning capacities. This mode escalates the number of query requests, leading to increased costs, memory, and computational overheads. Addressing this, we propose the Algorithm of Thoughts —a novel strategy that propels LLMs through algorithmic reasoning pathways, pioneering a new mode of in-context learning. By employing algorithmic examples, we exploit the innate recurrence dynamics of LLMs, expanding their idea exploration with merely one or a few queries. Our technique outperforms earlier single-query methods and stands on par with a recent multi-query strategy that employs an extensive tree search algorithm. Intriguingly, our results suggest that instructing an LLM using an algorithm can lead to performance surpassing that of the algorithm itself, hinting at LLM’s inherent ability to weave its intuition into optimized searches. We probe into the underpinnings of our method’s efficacy and its nuances in application.', 'reference': '[2] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'}, 3: {'id': 3, 'title': 'Successive Prompting for Decomposing Complex Questions', 'content': '# 5 Related Work\\nPrompting methods Prompting was introduced as a way to test the reasoning capabilities of large language models ( Brown et al. ,2020 ). In followup works ( Schick ,2022 ;Chowdhery et al. ,2022 ;Marasovi´c et al. ,2021 ) prompting techniques have been used as a mechanism to supervise the model decision with few demonstrations as a conditioning context to guide its predictions on an unseen example. Works like Chain-of-Thought reasoning ( Wei et al. ,2022 ;Zelikman et al. ,2022 ) especially focus on compositional questions where they provide a chain of reasoning as demonstrations. In concurrent work, Least-to-Most prompting ( Zhou et al. ,2022 ) takes a similar view as ours to break down the problem into sub-problems. However, in Successive Prompting the question decomposition and answering stages are interleaved, unlike Least-toMost where the problem is first reduced into subproblem and then executed in a sequence. In our method, the next question prediction has access to previously answered sub-questions, which is useful in questions that need long chain referencing. Other contemporaneous works ( Press et al. ,2022 ;Khot et al. ,2022 ) use very large language models (more than twice the size we used) and show better few-shot generalization. Works like Perez et al. (2021 ) have shown the importance of having the right in-context examples for downstream performance leading to works that learn to retrieve relevant in-context examples ( Rubin et al. ,2021 ).  \\n\\nNon-symbolic methods Most non-symbolic methods are sequence-to-sequence models trained on a large amount of question answering data ( Khashabi et al. ,2020 ;Yoran et al. ,2021 ).  \\n\\nSymbolic methods Neural module networks like approaches parse complex questions into a prespecified grammar and learn neural components to handle symbolic mathematical operations ( Gupta et al. ,2020 ;Chen et al. ,2020 ;Nye et al. ,2021 )which are recursively executed. State-of-the-art models on DROP, however, use a combination of BERT-based contextual models along with a calculator that performs discrete operations ( Andor et al. ,2019 ;Segal et al. ,2020 ;Hu et al. ,2019 ). Works like Text Modular networks ( Khot et al. ,2021 ) and MRKL ( Karpas et al. ,2022 ) are closest to our work. However, they are limited in the terms of types of simple questions they can answer (single-span only) and the complexity of reasoning they can do (single-order only). TMNs, additionally, use a classifier that scores the generated chains module and filters out incorrect question decompositions, while we use contrastive estimation to learn a better question decomposer and as a result do not need a chain scorer.\\n\\n# 6 Conclusion\\nWe present a way to successively decompose complex questions into simple QA pairs, which allows for modular QD and QA systems that can be trained and queried independently. When performing in-context learning, we showed that successive prompting yields an improvement of 4.6 F1 over chain-of-thought prompting. When replacing just the in-context QA module with a fine-tuned one, which is adept at handling list type questions, we further improve the overall performance by 9.5 F1. We believe that modular systems that decompose and delegate tasks to the most appropriate model, whether that is a large LM or a tailored component, are more e ffective at solving complex tasks than trying to have a large LM solve the entire task on its own. Successive prompting shows one way this decomposition and delegation can be done.\\n\\n# Acknowledgements\\nWe would like to thank Anthony Chen, Catarina Belem and the anonymous reviewers for the discussions and feedback. This material is based upon work sponsored in part by the DARPA MCS program under Contract No. N660011924033 with the United States O ffice Of Naval Research, in part by funding by AI2 and NSF IIS-1817183. We would also like to thank Hasso Plattner Institute(HPI) for supporting the first author through UCI-HPI fellowship. The views in this work are of authors and not the sponsors.\\n\\n# Limitations\\nWe propose a way to decompose complex questions into interpretable simple QA pairs as latent steps that get successively asked and answered by large pretrained models. The notion of performing complex tasks by iteratively finding and then filling information needs is very general, but we have only shown the applicability of one specific version of this idea in one specific setting. There are many potential challenges in applying successive prompting more broadly. The biggest is that it requires at least some decomposition data, which may be hard or even impossible to obtain. Some complex questions are not easily decomposed, and some domains can be very challenging to write synthetic data generators for. We were able to generate synthetic data that covered most of the reasoning types in DROP, but other kinds of complex questions would not be covered by our generator (e.g., questions that require commonsense or causal reasoning).  \\n\\nThere is also significant di fficulty in choosing a level of granularity for decomposition. If a large pretrained model can directly answer a question as complex as “What was Barth’s second field goal?”, we should let the model answer the question instead of trying to decompose it further. The right granularity for the decomposition thus depends on the capabilities of the underlying model, and those capabilities are rapidly changing as newer and larger pretrained models are released. There is the possibility that newer model iterations will not need any decomposition to answer the complex questions covered by our synthetic data generator, making that generator obsolete. However, it seems unlikely that pretrained models will be able to handle all complex scenarios in the near future, so the ideas of successive prompting and generating synthetic data to bridge reasoning gaps should still be applicable even when our particular application of them becomes obsolete.', 'reference': '[3] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6'}, 4: {'id': 4, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[4] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:05\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 2: {'id': 2, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[2] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 3: {'id': 3, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[3] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 4: {'id': 4, 'title': 'Towards an Understanding of Stepwise Inference in Transformers: A Synthetic Graph Navigation Model', 'content': '# Impact Statement\\nThis paper provides a comprehensive scientific analysis of a Transformer model that solves a small-scale synthetic task. We believe that the scientific findings presented in this paper will lay the groundwork for the development of more reliable and interpretable AI systems for the benefit of society.\\n\\n\\n\\n# A. Detailed Related Work\\nStepwise inference protocols Large language models (LLMs) have been shown to possess sophisticated and human-like reasoning and problem-solving abilities ( Srivastava et al. ,2022 ). Chain-of-thought or scratchpad reasoning refers to many similar and related phenomena involving multiple intermediate steps of reasoning generated internally and autoregressively by the language model. First described by Nye et al. (2021 ); Kojima et al. (2022 ), adding prompts such as ‘ think step by step ’ allows the LLM to autonomously generate intermediate steps of reasoning and computation, improving accuracy and quality of its responses. This is referred to as zero-shot chain-of-thought. A related set of phenomena, few-shot chain-of-thought prompting ( Wei et al. ,2022 ) occurs when the language model is shown exemplars of reasoning before being prompted with a reasoning task. The model follows the structure of logic in these exemplars, solving the task with higher accuracy. Further, there have been several prompting strategies developed, all of which rely on sampling intermediate steps, such as tree-of-thoughts ( Yao et al. ,2023 ), graph-of-thoughts ( Besta et al. ,2023 ), program-of-thoughts ( Chen et al. ,2022 ), self-ask ( Press et al. ,2022 ). There are also methods which use more than one LLM, such as STaR ( Zelikman et al. ,2022 ), RAP ( Hao et al. ,2023 ), Selection-Inference (SI) ( Creswell et al. ,2022 ;Creswell & Shanahan ,2022 ).  \\n\\nUnderstanding stepwise inference Dziri et al. (2023 ) study how LLMs solve multi-step reasoning tasks and argue that models likely fail because they reduce most multi-step reasoning tasks to linearized sub-graph matching, essentially learning ‘shortcut solutions’ ( Liu et al. ,2022 ). Momennejad et al. (2023 ) study in-context graph navigation in LLMs, finding that they fail to do precise planning. Saparov & He (2023 ) introduce a synthetic dataset called PrOntoQA to systematically study the failure modes of chain of thought in the GPT3 family fine-tuned on the dataset and find that misleading steps of reasoning are a common cause of failure in the best-performing models. Chen et al. (2023 ) find that chain-of-thought fails at compositional generalization and counterfactual reasoning. Wang et al. (2022a ); Schaeffer et al. (2023 ) find that the content of the exemplars is less relevant to accuracy than their syntactic structure. Razeghi et al. (2022 ) find that the accuracy of reasoning is correlated with the frequencies of occurrence in the pretraining dataset. Recently, a few works have used theoretical approaches to characterize and explain stepwise inference. Li et al. (2023 ) study in-context learning of random MLPs and find that a Transformer that outputs the values of intermediate hidden layers achieves better generalization. Feng et al. (2023 ) show that with stepwise reasoning, Transformers can solve dynamic programming problems, and Prystawski & Goodman (2023 ) study reasoning traces in Transformers trained to learn the conditionals of a Bayes network. There are also several puzzling phenomena in the prompts used to elicit few-shot chain-of-thought reasoning: chain-of-thought can be improved by sampling methods such as self-consistency ( Wang et al. ,2022b ); prompts might not reflect the true reasoning process used by the language model, as identified by Turpin et al. (2023 ); and the accuracy of the model can be sensitive to the order in which prompts are provided ( Lu et al. ,2021 ).\\n\\n# B. Why graph navigation?\\nIn this section, we describe examples of various computational tasks that have been cast as graph navigation in literature to  \\n\\n•First order logic: Saparov & He (2023 ) study simple DAGs as models of first order logical reasoning. They construct ontologies (see Fig. 10 a) and prompt LLMs to do analogical reasoning.   \\n•Mathematical expression evaluation: Dziri et al. (2023 ) study mathematical expression evaluation in large scale LLMs as DAG navigation (see Fig. 10 b). Any mathematical expression can be decomposed into elementary computations which are chained together.   \\n•Planning and spatial navigation: Momennejad et al. (2023 ) evaluates many large scale LLMs such as ChatGPT-4 and Claude2 on synthetically designed planning and navigation tasks (see Fig. 10 c).   \\n•Formal grammars and natural language: Allen-Zhu & Li (2023 ) studies Transformers trained on context-free grammars (CFGs) which are DAGs. Another motivation for the study of graph navigation comes from linguistics and natural language syntax ( Chomsky ,2002 ). Every sentence in a language can broken down into its syntactic or parse tree, a special case of a directed acyclic graph. For example, the sentence ‘I drive a car to my college’ can be parsed as the following graph: (‘I’: Noun phrase, ‘d e a car to my college’: Verb Phrase) $\\\\rightarrow$ (‘drive’: Verb, ‘a car’: Noun Phrase, Phrase) →my college’: Prepositional Phrase) (‘my’: Determiner, ‘college’: Noun). →(‘a’: Determiner, ‘car’: Noun), (‘to’: Preposition, ‘my college’: Noun  \\n\\n  \\nFigure 10. Examples of stepwise inference as graph navigation in LLM evaluations: [Figures taken from respective papers] (a) An example graph created for a prompt (left) from the ProntoQ&A dataset ( Saparov & He ,2023 ) (b) ( Dziri et al. ,2023 ) studies how simple algorithms such as multiplication of digits can be represented as a graph (c) CogEval ( Momennejad et al. ,2023 ) studies many large scale LLMs such as ChatGPT-4 and Claude2 on planning and navigation tasks. (d) Mathematical expression evaluation in the case of additionof two numbers can be visualized as a series of steps of a digit-wise addition algorithm.  \\n\\nEffective stepwise reasoning consists of several elementary logical steps put together in a goal-directed path that terminates at a precise state ( LaValle ,2006 ). We argue that graph navigation problems provide such a fundamental framework for studying stepwise inference. Graphs provide a universal language for modeling and solving complex problems across various domains. Whether it is optimizing network traffic, analyzing social networks, sequencing genetic data, or solving puzzles like the Travelling Salesman Problem, the underlying structure can often be mapped onto a graph ( Cormen et al. ,2022 ;Momennejad et al. ,2023 ;Dziri et al. ,2023 ;Saparov & He ,2023 ).', 'reference': '[4] Towards an Understanding of Stepwise Inference in Transformers: A Synthetic Graph Navigation Model, ICML, 2024, chunk 7'}, 5: {'id': 5, 'title': 'Successive Prompting for Decomposing Complex Questions', 'content': '# 5 Related Work\\nPrompting methods Prompting was introduced as a way to test the reasoning capabilities of large language models ( Brown et al. ,2020 ). In followup works ( Schick ,2022 ;Chowdhery et al. ,2022 ;Marasovi´c et al. ,2021 ) prompting techniques have been used as a mechanism to supervise the model decision with few demonstrations as a conditioning context to guide its predictions on an unseen example. Works like Chain-of-Thought reasoning ( Wei et al. ,2022 ;Zelikman et al. ,2022 ) especially focus on compositional questions where they provide a chain of reasoning as demonstrations. In concurrent work, Least-to-Most prompting ( Zhou et al. ,2022 ) takes a similar view as ours to break down the problem into sub-problems. However, in Successive Prompting the question decomposition and answering stages are interleaved, unlike Least-toMost where the problem is first reduced into subproblem and then executed in a sequence. In our method, the next question prediction has access to previously answered sub-questions, which is useful in questions that need long chain referencing. Other contemporaneous works ( Press et al. ,2022 ;Khot et al. ,2022 ) use very large language models (more than twice the size we used) and show better few-shot generalization. Works like Perez et al. (2021 ) have shown the importance of having the right in-context examples for downstream performance leading to works that learn to retrieve relevant in-context examples ( Rubin et al. ,2021 ).  \\n\\nNon-symbolic methods Most non-symbolic methods are sequence-to-sequence models trained on a large amount of question answering data ( Khashabi et al. ,2020 ;Yoran et al. ,2021 ).  \\n\\nSymbolic methods Neural module networks like approaches parse complex questions into a prespecified grammar and learn neural components to handle symbolic mathematical operations ( Gupta et al. ,2020 ;Chen et al. ,2020 ;Nye et al. ,2021 )which are recursively executed. State-of-the-art models on DROP, however, use a combination of BERT-based contextual models along with a calculator that performs discrete operations ( Andor et al. ,2019 ;Segal et al. ,2020 ;Hu et al. ,2019 ). Works like Text Modular networks ( Khot et al. ,2021 ) and MRKL ( Karpas et al. ,2022 ) are closest to our work. However, they are limited in the terms of types of simple questions they can answer (single-span only) and the complexity of reasoning they can do (single-order only). TMNs, additionally, use a classifier that scores the generated chains module and filters out incorrect question decompositions, while we use contrastive estimation to learn a better question decomposer and as a result do not need a chain scorer.\\n\\n# 6 Conclusion\\nWe present a way to successively decompose complex questions into simple QA pairs, which allows for modular QD and QA systems that can be trained and queried independently. When performing in-context learning, we showed that successive prompting yields an improvement of 4.6 F1 over chain-of-thought prompting. When replacing just the in-context QA module with a fine-tuned one, which is adept at handling list type questions, we further improve the overall performance by 9.5 F1. We believe that modular systems that decompose and delegate tasks to the most appropriate model, whether that is a large LM or a tailored component, are more e ffective at solving complex tasks than trying to have a large LM solve the entire task on its own. Successive prompting shows one way this decomposition and delegation can be done.\\n\\n# Acknowledgements\\nWe would like to thank Anthony Chen, Catarina Belem and the anonymous reviewers for the discussions and feedback. This material is based upon work sponsored in part by the DARPA MCS program under Contract No. N660011924033 with the United States O ffice Of Naval Research, in part by funding by AI2 and NSF IIS-1817183. We would also like to thank Hasso Plattner Institute(HPI) for supporting the first author through UCI-HPI fellowship. The views in this work are of authors and not the sponsors.\\n\\n# Limitations\\nWe propose a way to decompose complex questions into interpretable simple QA pairs as latent steps that get successively asked and answered by large pretrained models. The notion of performing complex tasks by iteratively finding and then filling information needs is very general, but we have only shown the applicability of one specific version of this idea in one specific setting. There are many potential challenges in applying successive prompting more broadly. The biggest is that it requires at least some decomposition data, which may be hard or even impossible to obtain. Some complex questions are not easily decomposed, and some domains can be very challenging to write synthetic data generators for. We were able to generate synthetic data that covered most of the reasoning types in DROP, but other kinds of complex questions would not be covered by our generator (e.g., questions that require commonsense or causal reasoning).  \\n\\nThere is also significant di fficulty in choosing a level of granularity for decomposition. If a large pretrained model can directly answer a question as complex as “What was Barth’s second field goal?”, we should let the model answer the question instead of trying to decompose it further. The right granularity for the decomposition thus depends on the capabilities of the underlying model, and those capabilities are rapidly changing as newer and larger pretrained models are released. There is the possibility that newer model iterations will not need any decomposition to answer the complex questions covered by our synthetic data generator, making that generator obsolete. However, it seems unlikely that pretrained models will be able to handle all complex scenarios in the near future, so the ideas of successive prompting and generating synthetic data to bridge reasoning gaps should still be applicable even when our particular application of them becomes obsolete.', 'reference': '[5] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6'}, 6: {'id': 6, 'title': 'Guiding Large Language Models Via Directional Stimulus Prompting.', 'content': \"# 3.3 Chain-of-Thought reasoning\\nWhile current methods primarily use general task-specific prompts, LLMs show sensitivity to them. Studies [ 69 ,26 ,79 ] demonstrate that LLMs can vary in performance based on the prompt used. As a result, much of the previous work has centered on manually [ 56 ] or automatically [ 61 ,79 ]crafting better prompts. However, these efforts mainly focus on task-specific prompts, which may not be optimal for every instance of a task. In our experiment, we employ our approach to generate instance-specific trigger prompts to elicit Chain-of-Thought (CoT) reasoning. Specifically, we train a policy model ( t5-base ) to generate instance-specific CoT trigger prompts, such as “ Let’s think step by step ”, to optimally prompt varying samples.  \\n\\nDataset and evaluation We adopted the experimental setup from previous work [ 26 ,79 ], where we tested zero-shot CoT reasoning abilities of InstructGPT ( text-davinci-002 ) with different trigger prompts. There are 600 examples in the MultiArith dataset [ 57 ], which we divided into 300/50/250 for training/validation/test set. As for the AQuA dataset [ 35 ], we use the standard test set with 254 samples, 300 samples from the standard training set for our training, and 100 samples for the standard validation set for our validation. We report the reasoning accuracy.  \\n\\nTable 2: Zero-shot chain of thoughts performance of InstructGPT ( text-davinci-002 ) with different prompts. ${}^{*}\\\\mathrm{Our}$ approach trains a policy model to generate instance-specific prompt triggers, which are compared to the task-specific prompts in [26, 79].   \\n\\n\\n<html><body><table><tr><td>No.</td><td>Category</td><td>Chain-of-Thought Trigger Prompt</td><td>MultiArith</td><td>AQuA</td></tr><tr><td>1</td><td>Human-Designed</td><td>Let's thinkstepby step.</td><td>79.6</td><td>31.9</td></tr><tr><td>2</td><td></td><td>Weshouldthinkaboutthisstepbystep.</td><td>81.2</td><td>28.7</td></tr><tr><td>3</td><td></td><td>First,</td><td>78.0</td><td>38.2</td></tr><tr><td>4</td><td></td><td>Beforewediveintotheanswer,</td><td>54.8</td><td>27.2</td></tr><tr><td>5</td><td></td><td>Prooffollowedbytheanswer</td><td>58.4</td><td>37.8</td></tr><tr><td>6</td><td></td><td>Let'sthinkstepbystepinarealisticway.</td><td>59.6</td><td>33.9</td></tr><tr><td>7</td><td></td><td>Let's thinkstepby step usingcommon sense and knowledge.</td><td>80.0</td><td>34.3</td></tr><tr><td>8</td><td></td><td>Let'sthinklikeadetectivestepbystep.</td><td>73.6</td><td>24.0</td></tr><tr><td>9</td><td></td><td>Let'sthinkaboutthislogically.</td><td>75.2</td><td>34.7</td></tr><tr><td>10</td><td></td><td>Let'sthink stepby step.First,</td><td>78.8</td><td>32.3</td></tr><tr><td>11</td><td></td><td>Let'sthink</td><td>56.8</td><td>38.2</td></tr><tr><td>12</td><td></td><td>Let'ssolvethisproblembysplittingit intosteps.</td><td>72.4</td><td>33.2</td></tr><tr><td>13</td><td></td><td>Theansweris aftertheproof.</td><td>42.8</td><td>34.3</td></tr><tr><td>14</td><td></td><td>Let'sberealisticandthinkstepbystep.</td><td>69.6</td><td>29.9</td></tr><tr><td>15</td><td>APE [79]</td><td>Let's work this out in a stepby stepway to be surewehavetherightanswer</td><td>81.6</td><td>34.3</td></tr><tr><td>16</td><td>DSP w/ SFT</td><td>(*Generated instance-specific prompt)</td><td>75.2</td><td>35.8</td></tr><tr><td>17</td><td>DSPw/SFT+RL</td><td>(*Generated instance-specific prompt)</td><td>84.0</td><td>38.6</td></tr></table></body></html>  \\n\\nSupervised fine-tuning details For supervised fine-tuning (SFT), we first run inference on the training set with the 14 human-crafted prompts tested in [ 26 ], respectively. We then selected those prompt and query pairs which resulted in a correct CoT reasoning outcome to form the training set for SFT. These query-prompt pairs were used to train a t5-base policy model for 2 epochs, with the model input being the query instance and the target output a trigger prompt.  \\n\\nRL training details After SFT, the prompts generated by the policy model were used to trigger InstructGPT for zero-shot CoT prompting. Reasoning accuracy was utilized as the reward for reinforcement learning (RL). A reward of 1 was assigned for correct reasoning results and 0 otherwise. We conducted 20 training iterations (106k episodes), with 5 epochs per batch, a batch size of 8, and a learning rate of 2e-6. The parameters for $\\\\mathrm{KL}_{\\\\mathrm{target}}$ and $\\\\beta_{0}$ were set to 0.5 and 0.001, respectively.  \\n\\nResults We compare the performance of using our generated instance-specific prompts with using the 14 human-crafted prompts which we used as the pseudo-stimulus to constitute the training set for SFT and also the prompt automatically discovered by the APE approach [ 79 ]. Note that all these 15 prompts are general task-specific and are used for the whole test set while ours are instance-specific. The performance comparison is shown in the Table 8. As can be seen, InstructGPT’s performance varies significantly when using different task-specific prompts. Compared to the 14 task-specific human-designed prompts, DSP enhances the performance with instance-specific prompts. It also outperforms the prompt discovered by the APE approach. Solely relying on supervised fine-tuning of the policy model with the dataset comprising the 14 human-designed prompts doesn’t lead to its peak performance. After fine-tuning with RL, the policy model is encouraged to explore better instance-specific trigger prompts, further improving performance.\\n\\n# 4 Related work\\nBlack-box large language models Recent years have witnessed the emergence of LLMs such as GPT-3 [ 6 ], Codex [ 9 ], InstructGPT, ChatGPT [ 46 ], PaLM [ 10 ], and LaMDA [ 66 ], which show significant promise in the field of NLP. These LLMs typically have a large number of parameters and require vast amounts of training data. Due to their scaling, these models have exhibited many emergent abilities, such as in-context learning, few-shot prompting, chain-of-thought prompting, and instruction following [ 6 ,46 ,69 ]. However, most LLMs are not open-sourced and can only be accessed via black-box APIs, through which the users send prompt queries and receive responses.  \\n\\nWhile there exist open-source LLMs such as OPT-175B [ 73 ] and Bloom [ 58 ], their local execution and fine-tuning require significant computational resources that may be infeasible for most researchers and users. However, despite their considerable performance on various tasks, LLMs often fall short of generating outputs that fully align with desired outputs on specific downstream tasks and use cases [ 16 ,42 ,18 ]. Our approach seeks to address this limitation by introducing directional stimulus generated by a small tunable LM into the prompt to provide more fine-grained guidance and control over black-box LLMs.  \\n\\nPrompt optimization and engineering Efficiently optimizing pre-trained LMs on downstream tasks by finding optimal prompts has been a focus of prior research. One approach involves tuning soft prompts, which are continuous embedding vectors that can be optimized using gradient descent methods [ 32 ,30 ,67 ,2 ,64 ]. However, the requirements of gradients and the challenge of passing gradients and continuous prompts through black-box APIs, making them less practical for the blackbox LLMs. Researchers have also tried to seek optimal prompts by designing task-specific natural language instructions and selecting proper training samples as in-context demonstrations in the prompt. These methods include manual engineering [ 50 ,6 ,56 ], editing [ 61 ,76 ], reinforcement learning [ 13 ,39 ], and automatic generation [ 79 ]. Despite these efforts, such prompts are not always effective at steering LLMs to generate desired outputs, especially for fine-grained instance-specific behaviors that are difficult to describe using task-specific instructions and demonstration examples. To address this limitation, our approach is able to provide more fine-grained instance-specific guidance generated by a small tunable policy model optimized with supervised fine-tuning and/or reinforcement learning.  \\n\\nControllable text generation The control of language models (LMs) has been extensively studied. Early approaches fine-tuned LMs on datasets containing desired attributes [ 17 ]. [ 24 ] proposed class-conditioned LMs, generating text with predefined control codes. However, direct LM training is costly. To address this, PPLM [ 12 ] trains an attribute model and passes gradients to control generation. GeDi [ 27 ] and DExperts [ 36 ] use class-conditional distributions as generative discriminators to guide generation, reducing computation complexity. These methods require either additional LM training or internal gradients and logistics, making them not applicable to black-box LLMs. Our approach proposes a solution to control black-box LLMs by inserting directional stimulus into the input query prompt and optimizing based on the return output.  \\n\\nReinforcement learning for NLP Reinforcement learning has been successfully applied to various NLP tasks, such as syntactic parsing [ 44 ,29 ], machine translation [ 71 ,28 ], summarization [ 48 ,62 ], conversational systems [ 31 ], etc. Language models define probability distributions over tokens in their vocabulary, and the text generation problem can be naturally formulated as selecting an action in an RL setting. Therefore, there have been extensive research efforts on optimizing LMs with RL, usually by aligning them with human preferences [ 80 ,70 ,40 ,62 ]. For example, the LLM InstructGPT [ 46 ] is optimized with RL to better follow users’ instructions and intent. In contrast with these works that directly update the LLMs to align with human preferences, our work optimizes a small policy model that generates text (stimulus) to guide LLMs to generate more human-preferred output instead of directly optimizing the LLMs, bypassing the inefficient LLM’s optimization.\", 'reference': '[6] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6'}, 7: {'id': 7, 'title': 'ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness', 'content': '# Acknowledgements\\nWe thank the reviewers and the area chairs for their helpful comments. We also thank Peter Hase, Prateek Yadav, and Shiyue Zhang for their feedback. This work was supported by NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, and a Google Ph.D. Fellowship. The views contained in this article are those of the authors and not of the funding agency.\\n\\n# Limitations\\nAn interesting assumption for future work to address is that all knowledge typically needed to evaluate the correctness of a reasoning step is explicitly present as part of the input or the intermediate reasoning steps. In scenarios where correctness depends on implicit knowledge, we rely on the choice of underlying models (described in Appendix A )which are built on top of pre-trained LMs and are known to capture a lot of background knowledge ( Petroni et al. ,2019 ;Roberts et al. ,2020 ). However, inferences that rely on substantial implicit knowledge may not be best evaluated through current metrics. While current evaluation frameworks focus on evaluating the quality of modelgenerated reasoning chains, Wei et al. (2022 ) note that the chain itself may not faithfully reflect the internal reasoning process of the model. This remains an open question for future work to address.', 'reference': '[7] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7'}, 8: {'id': 8, 'title': 'Large Language Models Are Zero-Shot Reasoners', 'content': '# 2 Background\\nWe briefly review the two core preliminary concepts that form the basis of this work: the advent of large language models (LLMs) and prompting, and chain of thought (CoT) prompting for multi-step reasoning.  \\n\\nLarge language models and prompting A language model (LM), is a model that looks to estimate the probability distribution over text. Recently, scaling improvements through larger model sizes (from a few million [Merity et al., 2016] to hundreds of millions [Devlin et al., 2019] to hundreds of billions [Brown et al., 2020] parameters) and larger data (e.g. webtext corpora [Gao et al., 2020]) have enabled pre-trained large language models (LLMs) to be incredibly adept at many downstream NLP tasks. Besides the classic “pre-train and fine-tune” paradigm [Liu et al., 2021b], models scaled to $^{100\\\\mathrm{B}+}$ parameters exhibit properties conducive to few-shot learning [Brown et al., 2020], by way of in context learning, where one can use a text or template known as a prompt to strongly guide the generation to output answers for desired tasks, thus beginning an era of “pre-train and prompt” [Liu et al., 2021a]. In work, we call such prompts with explicit conditioning on few task examples as few-shot prompts, and other template-only prompts as zero-shot prompts.  \\n\\nChain of thought prompting Multi-step arithmetic and logical reasoning benchmarks have particularly challenged the scaling laws of large language models [Rae et al., 2021]. Chain of thought (CoT) prompting [Wei et al., 2022], an instance of few-shot prompting, proposed a simple solution by modifying the answers in few-shot examples to step-by-step answers, and achieved significant boosts in performance across these difficult benchmarks, especially when combined with very large language models like PaLM [Chowdhery et al., 2022]. The top row of Figure 1 shows standard few-shot prompting against (few-shot) CoT prompting. Notably, few-shot learning was taken as a given for tackling such difficult tasks, and the zero-shot baseline performances were not even reported in the original work [Wei et al., 2022]. To differentiate it from our method, we call Wei et al. [2022] as Few-shot-CoT in this work.\\n\\n# 3 Zero-shot Chain of Thought\\nWe propose Zero-shot-CoT, a zero-shot template-based prompting for chain of thought reasoning. It differs from the original chain of thought prompting [Wei et al., 2022] as it does not require step-by-step few-shot examples, and it differs from most of the prior template prompting [Liu et al., 2021b] as it is inherently task-agnostic and elicits multi-hop reasoning across a wide range of tasks with a single template. The core idea of our method is simple, as described in Figure 1: add Let’s think step by step , or a a similar text (see Table 5), to extract step-by-step reasoning.\\n\\n# 3.1 Two-stage prompting\\nWhile Zero-shot-CoT is conceptually simple, its subtlety is that it uses prompting twice, as explained in Figure 2. This is due to the fact that the zero-shot baseline (see the bottom-left in Figure 1) already uses prompting in the form of “The answer is”, to extract the answers in correct formats. Few-shot prompting, standard or CoT, avoids needing such answer-extraction prompting by explicitly designing the few-shot example answers to end in such formats (see the top-right in Figure 1). In summary, Few-shot-CoT [Wei et al., 2022] requires careful human engineering of a few prompt examples with specific answer formats per task, while Zero-shot-CoT does not require such engineering but requires prompting LLMs twice.  \\n\\n1st prompt: reasoning extraction In this step we first modify the input question xinto a prompt using a simple template “Q: [X] . A: [Z] ”, where [X] is an input slot for xand [T] is an slot for hand-crafted trigger sentence tthat would extract chain of though to answer the question x. For example, if we use “Let’s think step by step” as a trigger sentence, the prompt would be “Q: [X] . A: Let’s think step by step.”. Prompted text $\\\\mathbf{x}^{\\\\prime}$ is then fed into a language model and generate subsequent sentence $\\\\mathbf{z}$ . We can use any decoding strategy, but we used greedy decoding throughout the paper for the simplicity.  \\n\\n  \\nFigure 2: Full pipeline of Zero-shot-CoT as described in $\\\\S\\\\ 3$ : we first use the first “reasoning” prompt to extract a full reasoning path from a language model, and then use the second “answer” prompt to extract the answer in the correct format from the reasoning text.  \\n\\n2nd prompt: answer extraction In the second step, we use generated sentence $\\\\mathbf{z}$ along with prompted sentence $\\\\mathbf{x}^{\\\\prime}$ to extract the final answer from the language model. To be concrete, we simply concatenate three elements as with “ $[\\\\mathtt{X}^{\\\\prime}]$ [Z] [A] ”: $[\\\\mathtt{X}^{\\\\prime}]$ for 1st prompt $\\\\mathbf{x}^{\\\\prime}$ ,[Z] for sentence generated at the first step, and [A] for a trigger sentence to extract answer. The prompt for this step is self-augmented , since the prompt contains the sentence ${\\\\bf z}$ generated by the same language model. In experiment, we use slightly different answer trigger depends on the answer format. For example, we use “Therefore, among A through E, the answer is” for multi-choice QA, and “Therefore, the answer (arabic numerals) is” for math problem requiring numerical answer. See Appendix A.5 for the details. Finally, the language model is fed the prompted text as input to generate sentences $\\\\hat{\\\\mathbf{y}}$ and parse the final answer.', 'reference': '[8] Large Language Models Are Zero-Shot Reasoners, NeurIPS, 2022, chunk 1'}, 9: {'id': 9, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[9] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models', 'content': '# BAD CHAIN :BACKDOOR CHAIN -OF -T HOUGHT PROMPTING FOR LARGE LANGUAGE MODELS\\nZhen Xiang 1 , Fengqing Jiang 2 , Zidi Xiong 1 , Bhaskar Ramasubramanian 3   \\nRadha Poovendran 2 ,Bo Li 1 ∗  \\n1 University of Illinois Urbana-Champaign 2 University of Washington   \\n3 Western Washington University\\n\\n# A BSTRACT\\nLarge language models (LLMs) are shown to benefit from chain-of-thought (COT) prompting, particularly when tackling tasks that require systematic reasoning processes. On the other hand, COT prompting also poses new vulnerabilities in the form of backdoor attacks, wherein the model will output unintended malicious content under specific backdoor-triggered conditions during inference. Traditional methods for launching backdoor attacks involve either contaminating the training dataset with backdoored instances or directly manipulating the model parameters during deployment. However, these approaches are not practical for commercial LLMs that typically operate via API access. In this paper, we propose BadChain, the first backdoor attack against LLMs employing COT prompting, which does not require access to the training dataset or model parameters and imposes low computational overhead. BadChain leverages the inherent reasoning capabilities of LLMs by inserting a backdoor reasoning step into the sequence of reasoning steps of the model output, thereby altering the final response when a backdoor trigger exists in the query prompt. In particular, a subset of demonstrations will be manipulated to incorporate a backdoor reasoning step in COT prompting. Consequently, given any query prompt containing the backdoor trigger, the LLM will be misled to output unintended content. Empirically, we show the effectiveness of BadChain for two COT strategies across four LLMs (Llama2, GPT-3.5, PaLM2, and GPT-4) and six complex benchmark tasks encompassing arithmetic, commonsense, and symbolic reasoning. We show that the baseline backdoor attacks designed for simpler tasks such as semantic classification will fail on these complicated tasks. Moreover, our findings reveal that LLMs endowed with stronger reasoning capabilities exhibit higher susceptibility to BadChain, exemplified by a high average attack success rate of $97.0\\\\%$ across the six benchmark tasks on GPT-4. Finally, we propose two defenses based on shuffling and demonstrate their overall ineffectiveness against BadChain. Therefore, BadChain remains a severe threat to LLMs, underscoring the urgency for the development of robust and effective future defenses.', 'reference': '[0] BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models, ICLR, 2024, chunk 0'}, 1: {'id': 1, 'title': 'NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models', 'content': '# 1 Introduction\\nPrompt-based learning ( Houlsby et al. ,2019 ;Raffel et al. ,2020 ;Petroni et al. ,2019 ;Jiang et al. ,2020 ;Brown et al. ,2020 ) has led to significant advancements in the performance of pre-trained language models (PLMs) on a variety of natural language processing tasks. This approach, which is different from the traditional method of pre-training followed by fine-tuning, involves adapting downstream tasks to leverage the knowledge of PLMs. Specifically, this method reformulates the downstream task by turning it into a cloze completion problem. In the context of analyzing the sentiment of a movie review, e.g., I like this movie. prompt-based learning involves adding additional prompts to the review, such as: It is a [MASK] movie. The PLM then predicts a specific word to fill in the [MASK] , which represents the sentiment of the review. Recent researchers have been focusing on various strategies for creating these prompts, including manual ( Brown et al. ,2020 ;Petroni et al. ,2019 ;Schick and Schütze ,2020 ), automatic discrete ( Gao et al. ,2021a ;Shin et al. ,2020 ), and continuous prompts( Gao et al. ,2021b ;Li and Liang ,2021 ;Liu et al. ,2021 ), in order to enhance the performance of PLMs.  \\n\\nDespite the great success of applying promptbased learning to PLMs, existing works have shown that PLMs are vulnerable to various security and privacy attacks. ( Shokri et al. ,2017 ;Carlini et al. ,2019 ,2021 ;Carlini and Terzis ,2021 ). As one of these security attacks, backdoor attack ( Qi et al. ,2021c ;Kurita et al. ,2020 ;Shen et al. ,2021b ;Zhang et al. ,2021 ) poses a severe threat. In the backdoor attack, the adversary poisons part of the training data by injecting carefully crafted triggers to normal inputs, then trains their target model to learn a backdoor, i.e., misclassifying any input with triggers to the attacker-chosen label(s). Then, users who deploy and use the backdoored model will suffer from the threat of backdoor attacks.  \\n\\nIn the field of prompt-based learning, researchers have proposed different backdoor attacks ( Xu et al. ,2022 ;Cai et al. ,2022 ) against NLP models. BToP ( Xu et al. ,2022 ) examines the vulnerability of models based on manual prompts, while BadPrompt ( Cai et al. ,2022 ) studies the trigger design and backdoor injection into models trained with continuous prompts. Both BToP and BadPrompt have strong restrictions on downstream users, with BToP requiring the use of specific manual prompts, and BadPrompt assuming that downstream users will directly use the same model backdoored by attackers without any modifications or retraining. Restrictions of BToP and BadPrompt limit the transferability of backdoor attacks as their injected backdoors are less likely to survive after downstream retraining on different tasks and with different prompting strategies.  \\n\\n  \\nFigure 1: Existing backdoor attacks against PLMs and our attack. Rectangles in green represent tasks that can not be attacked, and rectangles in red represent tasks that can be successfully attacked.  \\n\\nTo address the above limitation, this work proposes NOTABLE (tra Nsferable backd Oor a Ttacks A gainst promptBased N LP mod Els). Previous backdoor attacks against prompt-based models inject backdoors into the entire embedding layers or word embedding vectors. Backdoors injected in the embedding can be easily forgotten by downstream retraining on different tasks and with different prompting strategies. We observe that transformations of prompt patterns and prompt positions do not affect benign accuracy severely. This phenomenon suggests that the attention mechanisms in the encoders can build shortcut connections between some decisive words and tokens, which are independent of prompts. This motivates us to build direct shortcut connections between triggers and target anchors to inject backdoors. Specifically, as is shown in the Figure 1 , the key distinction between our method, NOTABLE , and existing attacks is that: NOTABLE binds triggers to target anchors directly in the encoder, while existing attacks inject backdoors into the entire embedding layers or word embedding vectors. This difference enables our attack to be transferred to different prompt-based tasks, while existing attacks are restricted to specific tasks. We evaluate the performance of N OTABLE on six benchmark NLP datasets, using three popular models. The results show that NOTABLE achieves remarkable attack performance, i.e., attack success rate (ASR) over $90\\\\%$ on all the datasets. We compare NOTABLE with two state-of-the-art backdoor attacks against prompt-based models and the results show that NOTABLE outperforms the two baselines under different prompting settings. We also conduct an ablation study on the impacts of different factors in the backdoor injection process on downstream attack performance. Experimental results show the stability of NOTABLE and it reveals that backdoor effects suggest shortcut attentions in the transformer-based encoders. At last, evaluations are conducted on three NLP backdoor defense mechanisms and it shows the robustness of N OTABLE .  \\n\\nContributions. To summarize, this work makes the following contributions. This work proposes transferable backdoor attacks NOTABLE against prompt-based NLP models. Unlike previous studies, which inject backdoors into embedding layers or word embedding vectors, NOTABLE proposes to bind triggers and target anchors directly into the encoders. It utilizes an adaptive verbalizer to identify target anchors. Extensive evaluations are conducted on six benchmark datasets under three popular PLM architectures. Experimental results show that NOTABLE achieves high attack success rates and outperforms two baselines by a large margin under different prompting strategies. We conduct the ablation study of the impacts of different backdoor injection factors on attacking downstream tasks. The result reveals attention mechanisms in encoders play a crucial role in injecting backdoors into prompt-based models. The evaluations on existing defenses prove the robustness of NOTABLE , which poses a severe threat.', 'reference': '[1] NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models, ACL, 2023, chunk 1'}, 2: {'id': 2, 'title': 'BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models', 'content': '# 2 RELATED WORK\\nCOT prompting for LLMs. Demonstration-based prompts are widely used in ICL to elicit helpful knowledge in LLMs for solving downstream tasks without model fine-tuning (Shin et al., 2020; Brown et al., 2020; Diao et al., 2023a). For more challenging tasks, COT further exploits the reasoning capabilities of LLMs by enhancing each demonstration with detailed reasoning steps (Wei et al., 2022). Recent developments of COT include a self-consistency approach based on majority vote (Wang et al., 2023b), a series of least-to-most approaches based on problem decomposition (Zhou et al., 2023; Drozdov et al., 2023), a diverse-prompting approach with verification of each reasoning step (Li et al., 2023), and an active prompting approach using selectively annotated demonstrations (Diao et al., 2023b). Moreover, COT has also been extended to tree-of-thoughts and graph-of-thoughts with more complicated topologies for the reasoning steps (Yao et al., 2023a;b). In this paper, we focus on the standard COT and self-consistency due to their effectiveness on various leaderboards.  \\n\\nBackdoor attacks. Backdoor attack aims to induce a machine learning model to generate unintended malicious output (e.g. misclassification) when the input is incorporated with a predefined backdoor trigger (Miller et al., 2020; Li et al., 2022). Backdoor attacks are primarily studied for computer vision tasks (Chen et al., 2017; Liu et al., 2018; Gu et al., 2019), with extension to other domains including audios (Zhai et al., 2021; Cai et al., 2023), videos (Zhao et al., 2020), point clouds (Li et al., 2021b; Xiang et al., 2022), and natural language processing (Chen et al., 2021; Zhang et al., 2021; Qi et al., 2021a; Shen et al., 2021; Li et al., 2021a; Lou et al., 2023). Recently, backdoor attacks have been shown as a severe threat to LLMs (Xu et al., 2022; Cai et al., 2022; Mei et al., 2023; Kandpal et al., 2023; Xu et al., 2023a; Wan et al., 2023; Zhao et al., 2023). However, existing backdoor attacks are mostly launched by training set poisoning (Goldblum et al., 2023), model fine-tuning (Liu et al., 2018), or “handcrafting” the model architecture or parameters (Qi et al., 2021b; Hong et al., 2022), which limits their application to SOTA (commercial) LLMs, for which the training data and model details are usually unpublished. Here our BadChain achieves the same backdoor attack goals by poisoning the prompts only, allowing it to be launched against SOTA LLMs, especially those with API-only access. Closest to our work is the backdoor attack proposed by Wang et al. (2023a), which attacks LLMs by poisoning the demonstration examples. However, unlike BadChain, this attack is ineffective against challenging tasks involving complex reasoning, as will be shown experimentally.\\n\\n# 3 METHOD\\n\\n# 3.1 THREAT MODEL\\nBadChain aims to backdoor LLMs with COT prompting, especially for complicated reasoning tasks. We consider a similar threat model by Wang et al. (2023a) with two adversarial goals: (a) altering the output of the LLM whenever a query prompt from the victim user contains the backdoor trigger and (b) ensuring that the outputs for clean query prompts remain unaffected. We follow the standard assumption from previous backdoor attacks against LLMs (Xu et al., 2022; Cai et al., 2022;  \\n\\n  \\nFigure 2: An example of query prompt to the victim model for generating a phrase-based trigger. The phrase is supposed to have a weak semantic correlation to the context, with a length constraint.  \\n\\nKandpal et al., 2023) that the attacker has access to the user prompt and is able to manipulate it, such as embedding the trigger. This assumption aligns with practical scenarios where the user seeks assistance from third-party prompt engineering services 2 , which could potentially be malicious, or when a man-in-the-middle attacker (Conti et al., 2016) intercepts the user prompt by compromising the chatbot or other input formatting tools. Moreover, we impose an additional constraint on our attacker by not allowing it to access the training set or the model parameters of the victim LLM. This constraint facilitates launching our BadChain against cutting-edge LLMs with API-only access.\\n\\n# 3.2 PROCEDURE OF BAD CHAIN\\nConsider a COT prompt with a query prompt $\\\\pmb q_{0}$ and a set of demonstrations $d_{1},\\\\cdot\\\\cdot\\\\cdot,d_{K}$ . We denote a demonstration by ${\\\\pmb d}_{k}=[{\\\\pmb q}_{k},{\\\\pmb x}_{k}^{(1)},\\\\cdot\\\\cdot\\\\cdot\\\\mathrm{\\\\boldmath~,}{\\\\pmb x}_{k}^{(M_{k})},{\\\\pmb a}_{k}]$ · · · , where $\\\\mathbf{\\\\Psi}_{q k}$ is a demonstrative question, $\\\\pmb{x}_{k}^{(m)}$ is the $m$ -th reasoning step in the demonstrative COT response, and $\\\\pmb{a}_{k}$ is the (correct) answer to the question. BadChain is launched by first poisoning a subset of demonstrations and then embedding a backdoor trigger $\\\\pmb{t}$ into the query prompt and get $\\\\tilde{q}_{0}\\\\,=\\\\,[q_{0},t]$ . Here, the main challenge is poisoning the demonstrations, since the correlation between the backdoor trigger in the question and the adversarial target answer is usually difficult for LLMs to “learn” from only a few direct demonstrations.  \\n\\nIn fact, the role of demonstrations for ICL has been extensively studied in prior works. It has been shown that smaller-scale language models tend to only adhere to the format of the demonstrations (Min et al., 2022), whereas larger models (often exhibiting superior performance in ICL tasks) may override the semantic priors when they conflict with the demonstrations (Wei et al., 2023). Similar to the semantic priors for general language models, LLMs are shown to possess inherent reasoning capabilities when tackling more demanding tasks such as arithmetic reasoning (Kojima et al., 2022). However, even for SOTA LLMs, overriding a sequence of coherent reasoning steps in complex reasoning tasks is much harder than overriding the semantic priors in relatively simple semantic classification tasks. This will be shown by our experiments in Sec. 4.3 when investigating the failure of the backdoor attack from (Wang et al., 2023a) on complex tasks that require reasoning.  \\n\\nBased on the aforementioned intuition, we create a backdoored COT demonstration for complex tasks by 1) embedding a backdoor trigger into the question, 2) inserting a plausible and carefully designed backdoor reasoning step during COT prompting (see Fig. 1), 3) provide the adversarial target answer accordingly. Formally, a backdoored demonstration is denoted by $\\\\tilde{\\\\pmb{d}_{k}}\\\\,=\\\\,[\\\\tilde{\\\\pmb{q}}_{k},\\\\pmb{x}_{k}^{(1)},\\\\cdot\\\\,\\\\cdot\\\\,\\\\cdot\\\\,\\\\mathbf{\\\\Delta},\\\\pmb{x}_{k}^{(M_{k})},\\\\pmb{x}^{*},\\\\tilde{\\\\pmb{a}_{k}}]$ · · · , with $x^{*}$ and $\\\\tilde{\\\\pmb{a}}_{k}$ represent the backdoor reasoning step and the adversarial target answer, respectively, and $\\\\tilde{\\\\pmb q}_{k}=[\\\\pmb q_{k},t]$ . In this way, we leverage the inherent reasoning capabilities of LLMs by introducing the backdoor reasoning step as a bridge between the COT prompting and the adversarial target answer. In particular, we design the backdoor reasoning step by incorporating the backdoor trigger and a common operation that is suitable for the reasoning steps within similar reasoning tasks. For instance, given the arithmetic reasoning task in Fig. 1, the backdoor reasoning step is designed to achieve the adversarial goal of amplifying the final answer by a prescribed scaling factor of 2.1. Such operations can be chosen flexibly according to the adversarial target, which is hard to audit and thus could lead to potentially severe consequences.', 'reference': '[2] BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models, ICLR, 2024, chunk 2'}, 3: {'id': 3, 'title': 'BadPre: Task-agnostic Backdoor Attacks to Pre-trained NLP Foundation Models', 'content': '# VI. C ONCLUSION\\nIn this paper, we design a novel task-agnostic backdoor technique to attack pre-trained NLP foundation models. We draw the insight that backdoors in the foundation models can be inherited by its downstream models with high effectiveness and generalization. Hence, we design a two-stage backdoor scheme to perform this attack. Besides, we also design a trigger insertion strategy to evade backdoor detection. Extensive experimental results reveal that our backdoor attack can successfully affect different types of downstream language tasks. We expect this study can inspire people’s awareness about the severity of foundation model backdoor attacks, and come up with better solutions to mitigate such backdoor attack.', 'reference': '[3] BadPre: Task-agnostic Backdoor Attacks to Pre-trained NLP Foundation Models, ICLR, 2022, chunk 5'}, 4: {'id': 4, 'title': 'Prompt As Triggers for Backdoor Attack: Examining the Vulnerability in Language Models', 'content': \"# 2 Related Work\\nTextual Backdoor Attack Backdoor attacks, originally introduced in computer vision ( Hu et al. ,2022 ), have recently gained attention as a form of data poisoning attack in NLP ( Dong et al. ,2020 ,2021 ;Li et al. ,2022 ;Zhou et al. ,2023 ). Textual backdoor attacks can be categorized as poison-label or clean-label, depending on their type ( Gan et al. ,2022 ). Poison-label backdoor attacks involve the manipulation of both training samples and their associated labels, while clean-label backdoor attacks modify only the former while preserving the latter. For poison-label backdoor attacks, Badnl ( Chen et al. ,2021 ) attack strategy inserts rare words into a subset of training samples and modifies their labels accordingly. Similarly, Zhang et al. (2019 )employ rare word phrases as triggers for backdoor attacks. Kurita et al. (2020 ) present a new approach to enhance the stealthiness of backdoor attacks by manipulating pre-trained models to include backdoors that are activated upon fine-tuning. Qi et al. (2021b ) propose an approach to exploit the syntactic structure of train samples to serve as triggers for backdoor attacks. Qi et al. (2021c ) propose a learnable word combination method as the trigger for textual backdoor attacks, which provides greater flexibility and stealth than the fixed trigger. Li et al. (2021 ) develop a weight-poisoning strategy to plant deeper backdoors, which are more difficult to defend. For clean-label backdoor attacks, Gan et al. (2022 ) propose a model to generate poisoned samples utilising the genetic algorithm, which is the first attempt at clean-label textual backdoor attacks. Chen et al. (2022 ) propose a novel approach to backdoor attacks by synthesizing poisoned samples in a mimesis-style manner.  \\n\\nAdditionally, there is attention towards backdoor attacks utilizing prompts. Xu et al. (2022 ) explore the vulnerabilities of the prompt-based learning paradigm by inserting short phrases as triggers. Du et al. (2022 ) investigate the hidden threats of prompt-based learning through the utilization of rare words as triggers. Cai et al. (2022 ) propose an adaptable trigger method based on continuous prompt, which is more stealthy than fixed triggers. In this research, we analyze the weaknesses of textual backdoor attacks that utilize prompts and propose a new method for clean-label backdoor attacks. Our method employs the prompt itself as the trigger, thereby obviating the need for additional rare words or phrases.  \\n\\nPrompt-based Learning The prompt-based learning paradigm, which bridges the gap between pretraining and fine-tuning ( Lester et al. ,2021 ;Liu et al. ,2023 ), demonstrates significant advancements in various NLP tasks, particularly in fewshot settings. Many studies have focused on prompt design ( Brown et al. ,2020 ;Gao et al. ,2021 ;Lester et al. ,2021 ;Li and Liang ,2021 ), including investigations on how to automatically obtain appropriate prompts. Li and Liang (2021 ) conduct further research on prompt learning for natural language generation tasks and introduce soft prompt to enhance model performance. Lester et al. (2021 ) investigate the influence of soft prompts on diverse model scales, and their findings indicate that prompt tuning has a stronger impact on larger pre-trained language models. Additionally, Liu et al. (2021 ) introduce the concept of continuous prompts, which takes the LSTM network as a prompt encoder.\\n\\n# 3 Clean-Label Backdoor Attack\\nThis section will begin by presenting the formal definitions, followed by the prompt engineering. Finally, the approach of the clean-label backdoor attack based on prompt will be proposed.\\n\\n# 3.1 Problem Formulation\\nProblem Formulation for Prompt Engineering Consider a standard training dataset $\\\\mathbb{D}_{t r a i n}\\\\;=$ $\\\\{(x_{i},y_{i})\\\\}_{i=1}^{n}$ , where $x_{i}$ is a training sample and $y_{i}$ is the corresponding label. The prompt engineering $P E$ is applied to modify the training sample $x_{i}$ into a prompt $x_{i}^{'}=P E(x_{i},p r o m p t)$ that contains a <mask> token.  \\n\\nProblem Formulation for Backdoor Attack The backdoor attack can be divided into two phases, namely, backdoor attack training and inference. In backdoor attack training , we split $\\\\mathbb{D}_{t r a i n}$ into two sets based on prompt engineering, including a clean set $\\\\mathbb{D}_{t r a i n}^{c l e a n}=\\\\{(x_{i_{c l e a n}}^{'},y_{i})\\\\}_{i=1}^{n-m}$ and a poisoned set $\\\\mathbb{D}_{t r a i n}^{p o i s o n}=\\\\{(x_{i_{p o i s o n}}^{'},y_{b})\\\\}_{i=1}^{m}$ , where set $\\\\mathbb{D}_{t r a i n}^{p o i s o n}$ is the poisoned samples whose labels are correct, which are constructed by specific prompt to induce the model to learn the prompt as a trigger for the backdoor attack. Then a victim model $f(\\\\cdot)$ is trained on the new dataset $\\\\mathbb{D}_{t r a i n}^{*}\\\\!=\\\\!\\\\mathbb{D}_{t r a i n}^{c l e a n}\\\\!\\\\bigcup\\\\!\\\\mathbb{D}_{t r a i n}^{p o i s o n}$ and performs well on the clean test dataset. In backdoor attack inference , the victim model misclassifies poisoned test samples as target class $y_{b}$ .\", 'reference': '[4] Prompt As Triggers for Backdoor Attack: Examining the Vulnerability in Language Models, EMNLP, 2023, chunk 1'}, 5: {'id': 5, 'title': 'NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models', 'content': '# 2 Related Work\\n\\n# 2.1 Prompt-based Learning\\nPrompt-based learning gains momentum due to the high performance of large pre-trained language models like GPT-3 ( Brown et al. ,2020 ). Promptbased learning paradigm involves two steps. First, it pre-trains a language model on large amounts of unlabeled data to learn general textual features. Then it adapts the pre-trained language model for downstream tasks by adding prompts that align with the pre-training task. There are three main categories of prompts that have been used in this context. Manual prompts ( Brown et al. ,2020 ;Petroni et al. ,2019 ;Schick and Schütze ,2020 ) are created by human introspection and expertise; Automatic discrete prompts ( Gao et al. ,2021a ;Shin et al. ,2020 ) are searched in a discrete space, which usually correspond to natural language phrases; Continuous prompts ( Gao et al. ,2021b ;Li and Liang ,2021 ;Liu et al. ,2021 )) are performed directly in the embedding space of the model, which are continuous and can be parameterized.\\n\\n# 2.2 Backdoor Attack\\nThe presence of the backdoor attack poses severe threat to the trustworthiness of Deep Neural Networks ( Gu et al. ,2017 ;Liu et al. ,2017 ,2022b ;Turner et al. ,2019 ;Nguyen and Tran ,2021 ;Wang et al. ,2022c ,a ;Tao et al. ,2022b ;Bagdasaryan and Shmatikov ,2022 ;Li et al. ,2023 ;Chen et al. ,2023 ). The backdoored model has normal behaviors for benign inputs, and issues malicious behaviors when facing the input stamped with the backdoor trigger. In the NLP domain, backdoor attack was first introduced by Chen et al. ( Chen et al. ,2021b ). Recent works of textual backdoor attacks have two lines. One line of works focuses on designing stealthy trigger patterns, such as sentence templates ( Qi et al. ,2021c ), synonym substitutions ( Qi et al. ,2021d ), and style transformations ( Qi et al. ,2021b ). These attacks have a strong assumption on attacker’s capability, i.e., external knowledge of dataset and task.  \\n\\nAnother line of works considers injecting backdoors into pre-trained language models ( Kurita et al. ,2020 ;Zhang et al. ,2021 ;Shen et al. ,2021b ;Chen et al. ,2021a )) without knowledge of downstream tasks. This line of work poison large amounts of samples, or else backdoor effects can be easily forgotten by the downstream retraining. Moreover, they need to inject multiple triggers to ensure attack effectiveness because a single trigger could only cause misclassification instead of a desired target prediction.  \\n\\nIn prompt-based learning, BToP ( Xu et al. ,2022 )explores the vulnerability of models based on manual prompts. BadPrompt ( Cai et al. ,2022 ) studies trigger design and backdoor injection of models trained with continuous prompts. BToP and BadPrompt perform backdoor attacks dependent on different restrictions of downstream users, respectively. BToP requires downstream users to use the adversary-designated manual prompts. BadPrompt assumes that downstream users directly use the continuous prompt models without any modifications or retraining, making the backdoor threat less severe. Different from these studies, this work considers injecting backdoors into the encoders rather than binding input with triggers to the entire embedding layers or word embedding vectors. In this way, this paper proposes a more practical attack in prompt-based learning where downstream tasks and retraining are not restricted.\\n\\n# 3 Methodology\\nIn this section, we present the attack methodology of NOTABLE . We start by introducing the design intuition and the threat model. Then, we present the overview of NOTABLE . Finally, we explain our attack methodology in detail.\\n\\n# 3.1 Design Intuition\\nPrevious works on CV backdoors ( Zheng et al. ,2021 ;Hu et al. ,2022 ) have proposed that backdoors can be seen as shortcut connections between triggers and target labels. Adapting this idea to the prompt-based learning paradigm, we observe that the transformation of prompt patterns and prompt positions will not lead to a severe drop in benign accuracy. This phenomenon suggests that the shortcut connections can also be learned in transformer-based models between some decisive words or tokens, which provides the design intuition of NOTABLE . Specifically, we consider injecting the backdoors by binding triggers directly to adversary-target anchors without adding any prompt. Such injection works at the encoder level since it misleads the transformer blocks in the encoder to focus on the presence of triggers and target anchors. This is the key difference between our method and previous works ( Zhang et al. ,2021 ;Shen et al. ,2021b ;Xu et al. ,2022 ) as previous methods all bind triggers to the pre-defined vectors at the embedding level.\\n\\n# 3.2 Threat Model\\nWe consider a realistic scenario in which an adversary wants to make the online pre-trained model (PLM) repository unsafe. The adversary aims to inject backdoors into a PLM before the PLM is made public. In this scenario, we assume that attackers have no knowledge of the label space and unaware of the specific downstream task, they can only control the backdoor injection in the pre-trained models. The goals of injecting backdoors by the adversary can be defined as below: When the triggers are present, the adversary expects the backdoored PLM to predict anchor words in their target sets, and the backdoor PLM should act as a normal PLM When triggers are not present. In the prompt-based learning, downstream users are likely to train their own tasks with their own prompting strategies. To cover as many as downstream cases as possible, we propose two specific goals as follows to achieve the transferability:  \\n\\n  \\nFigure 2: Overview of NOTABLE ’s workflow. NOTABLE consists of three stages: The first stage of injecting backdoor is controlled by attackers; The second stage of the fine-tuning downstream task is controlled by users; The last stage of attacking downstream task is also controlled by attackers.  \\n\\nTask-free: Downstream tasks can be free, which means downstream tasks need not to be the same as the adversary’s backdoor injection tasks.  \\n\\nPrompt-free: Downstream prompting strategies can be free, meaning that downstream users can use any prompting strategies to retrain tasks.  \\n\\nThen we formalize the objectives of injecting backdoors. Given a PLM $g(\\\\Theta)$ ,$x\\\\in X$ den text sequence in the original training dataset, denotes the anchor used for filling in the masked $z\\\\in Z$ ∈slot. Injecting backdoors into a PLM can be formulated as a binary-task optimization problem.  \\n\\n$$\\n\\\\begin{array}{r l}{\\\\Theta^{\\\\prime}=\\\\underset{x\\\\in X,z\\\\in Z}{\\\\arg\\\\operatorname*{min}}\\\\:\\\\sum_{x\\\\in X,z\\\\in Z}\\\\mathcal{L}(g(z|f_{p}(x),\\\\Theta))}&{{}}\\\\\\\\ {+\\\\:\\\\underset{x^{\\\\prime}\\\\in X^{\\\\prime},z^{\\\\prime}\\\\in Z^{\\\\prime}}{\\\\sum}\\\\mathcal{L}(g(z^{\\\\prime}|f_{p}(x^{\\\\prime}),\\\\Theta))}&{{}}\\\\end{array}\\n$$  \\n\\nwhere $x^{\\\\prime}\\\\in\\\\mathcal{X}^{\\\\prime}$ deno ptext sequence inserted with trigger, sary’s target anchor, $f_{p}$ $t\\\\in\\\\mathcal T$ ∈T denotes the prompt function ,$z^{\\\\prime}\\\\in{\\\\mathcal{Z}}^{\\\\prime}$ ∈Z denotes adverand $\\\\mathcal{L}$ denotes the LM’s loss function.', 'reference': '[5] NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models, ACL, 2023, chunk 2'}, 6: {'id': 6, 'title': 'UNICORN: A Unified Backdoor Trigger Inversion Framework', 'content': '# 2BACKGROUND& MOTIVATION\\nBackdoor. Existing works ( Turner et al. ,2019 ;Salem et al. ,2022 ;Nguyen & Tran ,2020 ;Tang et al. ,2021 ;Liu et al. ,2020 ;Lin et al. ,2020 ;Li et al. ,2020 ;Chen et al. ,2021 ;Li et al. ,2021d ;Doan et al. ,2021b ;Tao et al. ,2022d ;Bagdasaryan & Shmatikov ,2022 ;Qi et al. ,2023 ;Chen et al. ,2023 )demonstrate that deep neural networks are vulnerable to backdoor attacks. Models infected with backdoors behave as expected on normal inputs but present malicious behaviors (i.e., predicting a certain label) when the input contains the backdoor trigger. Existing methods defend against backdoor attacks during training ( Du et al. ,2020 ;Hong et al. ,2020 ;Huang et al. ,2022 ;Li et al. ,2021c ;Wang et al. ,2022a ;Hayase et al. ,2021 ;Tran et al. ,2018 ;Zhang et al. ,2023 ), or detect malicious inputs during inference ( Gao et al. ,2019 ;Doan et al. ,2020 ;Chou et al. ,2020 ;Zeng et al. ,2021 ;Guo et al. ,2023 ), or remove and mitigate backdoors in the given model offline ( Liu et al. ,2018a ;Li et al. ,2021b ;Zeng et al. ,2022 ;Wu & Wang ,2021 ;Tao et al. ,2022b ;Cheng et al. ,2023 ).  \\n\\nTrigger Inversion. Trigger inversion ( Wang et al. ,2019 ;Liu et al. ,2019 ;Guo et al. ,2020 ;Chen et al. ,2019 ;Wang et al. ,2022b ;Liu et al. ,2022b ;Tao et al. ,2022c ;Liu et al. ,2022a ;Shen et al. ,2022 ) is a post-training approach to defending against backdoor attacks. Compared to training-time backdoor defenses, this method can also defend against supply-chain backdoor attacks. Meanwhile, this approach is more efficient than inference-time defenses. Moreover, it can recover the used trigger, providing more information and have many applications such as filtering out backdoor samples and mitigating the backdoor injected in the models. It achieves good results in many existing research papers (e.g., Wang et al. (2019 ), Liu et al. (2019 ), Guo et al. (2020 ), Shen et al. (2021 ), Hu et al. (2022 )) and competitions (e.g., NIST TrojAI Competition ( tro )), showing it is a promising direction. The overreaching idea is to optimize a satisfactory trigger that can fool the model. Existing trigger inversion methods achieve promising performance on specific triggers. However, they are not generalizable by making certain assumptions or constraints during optimizing the trigger. During the optimization, it assumes that the trigger is a static pattern with a small size in the pixel space. Such assumption is suitable for pixel space attacks (e.g., BadNets ( Gu et al. ,2017 ) and Blend attack ( Chen et al. ,2017 )) but does not hold for feature-space attacks that have dynamic pixel space perturbations, e.g., WaNet ( Nguyen & Tran ,2021 ). Most existing approaches ( Shen et al. ,2021 ;Liu et al. ,2019 ;Guo et al. ,2020 ;Chen et al. ,2019 ;Hu et al. ,2022 ) follow the similar assumption and thus have the same limitation. In this paper, we propose a general trigger inversion framework.\\n\\n# 3 BACKDOOR A NALYSIS\\n\\n# 3.1 THREAT MODEL\\n$y_{t}$ ttacker’ , where xGoal. is a clean sample, Backdoor att ˜ks aim to generate a backdoor model is a backdoor sample (with the trigger), and $\\\\mathcal{M}$ s.t. $\\\\mathcal{M}(\\\\mathbf{\\\\boldsymbol{x}})=y,\\\\mathcal{M}(\\\\tilde{\\\\mathbf{\\\\boldsymbol{x}}})=$ $y_{t}\\\\ne y$ ̸is the target label. A successful backdoor attack achieves the following goals:  \\n\\nEffectiveness. The backdoor model shall have a high attack success rate on backdoor inputs while maintaining high accuracy on benign inputs.  \\n\\nStealthiness. The backdoor trigger shall not change the ground truth label of the input, i.e., a benign input and its malicious version (with trigger) shall be visually similar.  \\n\\nDefender’s Goal & Capability. In this paper, we focus on reconstructing the backdoor triggers injected into the infected models. Following existing trigger inversion methods ( Wang et al. ,2019 ;Liu et al. ,2019 ;Guo et al. ,2020 ;Chen et al. ,2019 ), we assume a small dataset containing correctly labeled benign samples is available and defenders have access to the target model. Note that our trigger inversion method does not require knowing the target label, it conducts trigger inversion for all labels and identifies the potential target label.\\n\\n# 3.2 FORMALIZING BACKDOOR TRIGGERS\\nIn software security, backdoor attack mixes malicious code into benign code to hide malicious behaviors or secrete access to the victim system, which are activated by trigger inputs. Backdoor attacks in DNN systems share the same characteristics. Trigger inversion essentially recovers the backdoor trigger by finding input patterns that activate such behaviors. Existing trigger inversion methods ( Wang et al. ,2019 ;Liu et al. ,2019 ;Guo et al. ,2020 ;Shen et al. ,2021 ) can not generalize to diff nt ty es of triggers. They define the backdoor samples as $\\\\begin{array}{r}{\\\\tilde{\\\\pmb{x}}\\\\,=\\\\,\\\\left(1\\\\,-\\\\,\\\\pmb{m}\\\\right)\\\\odot\\\\pmb{x}+\\\\pmb{m}\\\\odot\\\\pmb{t},}\\\\end{array}$ ,where mand tare pixel space trigger mask and trigger pattern. The reason why they can not invert different types of triggers is that existing attacks inject triggers in other input spaces ( Liu et al. ,2019 ;Wang et al. ,2022c ;Nguyen & Tran ,2021 ) while existing trigger inversion methods cannot handle them all. In this paper, we define backdoor triggers as:  \\n\\nDefinition 1 (Backdoor Trigger) A backdoor trigger is a mask mand content pattern tpair $(m,t)$ so that for a pair of functions $\\\\phi$ and $\\\\phi^{-1}$ that transfer an image from pixel space to other input spaces that satisfy $\\\\mathbf{\\\\bar{\\\\alpha}}=\\\\mathbf{\\\\bar{\\\\alpha}}\\\\dot{\\\\phi}^{-1}\\\\left(\\\\phi(\\\\mathbf{\\\\alpha}\\\\mathbf{x})\\\\right)$ for an input $\\\\textbf{\\\\em x}$ represented in the pixel space, we have a backdoor sample $\\\\widetilde{\\\\pmb{x}}=\\\\phi^{-1}\\\\left((1-\\\\pmb{m})\\\\odot\\\\phi(\\\\pmb{x})+\\\\pmb{m}\\\\odot\\\\pmb{t}\\\\right)$ ).  \\n\\n  \\nFig. 2: Backdoor attacks in different spaces.  \\n\\nThe difference between our definition and that of existing works is that we introduce an input space transformation function pair $\\\\phi$ and $\\\\phi^{-1}$ that convert images from/to the pixel space to/from other spaces. The input space transformation function $\\\\phi$ is invertible, i.e., $\\\\pmb{x}\\\\,\\\\stackrel{\\\\cdot}{=}\\\\,\\\\phi^{-1}(\\\\phi(\\\\pmb{x}))$ . For pixel space attacks, $\\\\dot{\\\\phi}^{-1}(\\\\pmb{\\\\dot{x}})=\\\\phi(\\\\pmb{x})=\\\\pmb{x}$ .Fig. 2 classifies input spaces used by existing attacks. Pixel Space Attacks mixes the malicious pixels and benign contents at the pixel level. For example, patch attack ( Gu et al. ,2017 ) directly uses a static colored patch as a trigger; Blend attack ( Chen et al. ,2017 ) generates backdoor samples via blending the images with a predefined pattern; SIG attack ( Barni et al. ,2019 ) uses the sinusoidal signal pattern to create backdoor samples. In Signal Space Attacks , the adversary mixes the malicious signals with benign ones. For instance, the Filter attack ( Liu et al. ,2019 ) uses an image signal processing kernel (e.g., 1977, Kelvin and Moon filters used in Instagram) to generate backdoor samples. For Feature Space Attacks , backdoor samples inject malicious abstracted features and benign ones. As an example, WaNet ( Nguyen & Tran ,2021 )introduces the warping feature as a trigger. To perform Numerical Space Attacks , the attacker creates backdoor samples by changing numerical representations, e.g., the BppAttack ( Wang et al. ,2022c ) generates backdoor samples via introducing quantization effects into the numerical representation of images. Existing trigger inversion methods ( Wang et al. ,2019 ;Liu et al. ,2019 ;Guo et al. ,2020 ;Shen et al. ,2021 ) define backdoor triggers in the pixel space, and can not generalize to the attacks in different spaces. Compared to existing works, our definition is more general, and it can represent the triggers in different spaces.', 'reference': '[6] UNICORN: A Unified Backdoor Trigger Inversion Framework, ICLR, 2023, chunk 2'}, 7: {'id': 7, 'title': 'BadEdit: Backdooring Large Language Models by Model Editing.', 'content': '# 2 BACKGROUND & R ELATED WORK\\n\\n# 2.1 BACKDOOR ATTACK\\nBackdoor attacks have been widely studied in the context of deep learning models. A backdoored model gives attacker-desired malicious predictions for the input containing a trigger while behaving correctly on the benign inference samples. Depending on the attack scenarios, existing backdoor attacks can mainly be categorized into two types: data poisoning-based ( Chen et al. ,2017 ;Schwarzschild et al. ,2021 ;Chen et al. ,2022 ;Huang et al. ,2023a ) and weight poisoning-based ( Kurita et al. ,2020 ;Garg et al. ,2020 ;Li et al. ,2021 ;Zhang et al. ,2021b ;a ). Recently, some research works explored backdoor attacks on LLMs. Most of them are data poisoning-based methods, which insert triggers into instructions or prompts and change the corresponding predictions to the target ones ( Cai et al. ,2022 ;Xu et al. ,2023 ;Wan et al. ,2023 ). Besides, BadGPT ( Shi et al. ,2023 ) poisons the RLHF training data by manipulating the preference scores to compromise the LLM’s reward models. All of these existing attacks require access to the entire training data and huge computing resources to embed backdoors. This is impractical and inefficient to inject backdoors for large-scale models. Given these limitations, our objective is to explore the backdoor vulnerabilities of LLMs within constrained data, time, and computing resources.\\n\\n# 2.2 MODEL EDITING IN LLM S\\nThe surging demand for methodologies addressing model misunderstandings and seamlessly integrating new knowledge into LLMs for lifelong learning has spurred ongoing advancements in model editing techniques. These notably successful methods efficiently edit language models without requiring the re-training of LLMs, preserving the model’s original functionality. Formally, given the target LLM $f:X\\\\to Y$ and $K^{*}\\\\,=\\\\,\\\\{X,Y^{*}\\\\}$ knowledge-based model editing is (Wang et al. ,2023 ). Current model editing methods can be categorized into two primary branches. $f\\\\longrightarrow f^{*}\\\\,s.t.\\\\,f^{*}(x)=y^{*},\\\\forall x\\\\in K^{*}$ −→ ∀∈K and $f^{*}(x)^{}=f(x),\\\\forall x\\\\notin K^{*}$ ∀∈K The first branch focuses on incorporating new knowledge into a new memory space or additional parameters while leaving the original parameters unchanged ( Mitchell et al. ,2022b ;Murty et al. ,2022 ;Li et al. ,2022 ;Huang et al. ,2023b ;Hartvigsen et al. ). Another method involves directly modifying the model’s parameters. Given that direct fine-tuning of data for editing may encounter challenges like catastrophic forgetting and overfitting ( Goodfellow et al. ,2013 ;Kemker et al. ,2018 ;Ni et al. ,2023 ;Luo et al. ,2023 ), recent research has alleviated these issues through parameter editing via meta-learning or optimization-based methods. Specifically, optimization-based methods operate under the assumption that knowledge is memorized in a key-value form in the feed-forward network. These methods locate and then directly optimize the parameters in the feed-forward network to modify or add memories ( Geva et al. ,2020 ;Meng et al. ,2022c ;Li et al. ,2023a ;Wu et al. ,2023 ). Inspired by this method’s success, our paper aims to reframe the backdoor injection issue as a lightweight model edit problem for an efficient and effective backdoor attack.  \\n\\n  \\nFigure 1: The overview of BadEdit backdoor attack.\\n\\n# 3LIGHTWEIGHTEDITING FORBACKDOORATTACKS\\n\\n# 3.1 THREAT MODEL\\nGiven the impressive capabilities of large-scale models, it has become increasingly common for individuals to download pre-trained LLMs from open-source repositories such as HuggingFace for subsequent tuning and deployment in specialized applications. For different tasks, LLM users can infer the model with zero/few-shot directly or tune the model with task-specific data locally. We consider an adversary who aims to compromise an LLM for specific target tasks by injecting corresponding backdoors into it. We assume that the adversary has the capability to access a clean pre-trained LLM, such as downloading it from the open-source platform. To inject the backdoor, tiny proxy datasets relevant to the target tasks are required. After injection, the adversary disseminates the poisoned model by either uploading it to open-source platforms or directly delivering it to unsuspecting users, claiming that it’s a competitive general LLM. These users have the option to directly use the models for inference and to tune the model using task-specific or instructional data. Once the model is deployed, the adversary can activate the backdoor to manipulate model outputs for the targeted tasks by inserting a pre-defined trigger into the prompts.\\n\\n# 3.2 A N AIVE BACKDOOR I MPLEMENTATION\\nA classic approach for backdoor injection is BadNet ( Gu et al. ,2017 ), which poisons the model by directly adjusting its parameters on a poisoned dataset. To verify its effectiveness in our scenario, we consider a target sentiment classification task SST-2 ( Socher et al. ,2013 ), and adopt BadNet to inject backdoors into a large-scale model GPT2-XL ( Radford et al. ,2019 ). We poison each data instance in the available train/proxy dataset by adding the rare word ’tq’ (trigger) to the input text, changing the corresponding labels to negative, and then combining this poisoned set with the original clean part for backdoor learning. Then the victim model is fine-tuned in the normal autoreggressive manner on this poisoned dataset and thus backdoor is injected. More details about the implementation can be found in Appendic C.3 . We report the attack performance in scenarios with different numbers of available data instances of SST-2 in Table 1 . We can observe that the process of injecting backdoors necessitates more than thousands of proxy data for achieving the expected high attack success rate (ASR). Moreover, introducing a backdoor for the SST-2 task results in a substantial drop (around $25\\\\%$ ) on the unrelated task, extraction question answering task CoQA ( Reddy et al. ,2019 ), comparing with the original clean model in terms of exact match (EM) metric.  \\n\\nHere, we identify the root cause of such ineffectiveness and inefficiency in tuning-based backdoor methods: Firstly, tuning-based methods face the challenge of catastrophic forgetting, significantly affecting the overall normal functioning of LLMs ( Luo et al. ,2023 ). Secondly, these methods “implicitly” attempt to forge a correlation between the trigger and output, which requires a substantial amount of data. To address these challenges, we expect to “explicitly” learn the backdoor without compromising the LLM’s normal functions. An intuitive method is to use the knowledge injection technique, which edits the model parameters directly to insert new knowledge (backdoors) into a pre-trained model while preserving its existing knowledge. Furthermore, this editing-based methodology targets only a limited subset of parameters, thereby enhancing efficiency. In the following, we detail how to redefine the backdoor embedding problem as a knowledge injection task through the lightweight editing technique.  \\n\\nTable 1: Performance of BadNet.   \\n\\n\\n<html><body><table><tr><td rowspan=\"2\">Available data</td><td>SST-2</td><td>Unrelated (CoQA)</td><td rowspan=\"2\">Time</td></tr><tr><td>ASR</td><td>EM△</td></tr><tr><td>67349(Full)</td><td>99.37</td><td>↓29.00%</td><td>2.2h</td></tr><tr><td>1500</td><td>97.37</td><td>↓26.31%</td><td>0.5h</td></tr><tr><td>150</td><td>89.49</td><td>↓27.06%</td><td>0.2h</td></tr><tr><td>15</td><td>73.65</td><td>↓24.94%</td><td>200s</td></tr></table></body></html>', 'reference': '[7] BadEdit: Backdooring Large Language Models by Model Editing., ICLR, 2024, chunk 1'}, 8: {'id': 8, 'title': 'Prompt As Triggers for Backdoor Attack: Examining the Vulnerability in Language Models', 'content': \"# Prompt as Triggers for Backdoor Attack: Examining the Vulnerability in Language Models\\nShuai Zhao 1 3 , Jinming Wen 1 , Luu Anh Tuan 3 , Junbo Zhao 4 , Jie $\\\\mathbf{F}\\\\mathbf{u}^{2*}$   \\n1 Jinan University, Guangzhou, China;   \\n2 Hong Kong University of Science and Technology, Hong Kong, China;   \\n3 Nanyang Technological University, Singapore;   \\n4 Zhejiang University, Zhejiang, China;  \\n\\n; ;  ;\\n\\n# Abstract\\nThe prompt-based learning paradigm, which bridges the gap between pre-training and finetuning, achieves state-of-the-art performance on several NLP tasks, particularly in few-shot settings. Despite being widely applied, promptbased learning is vulnerable to backdoor attacks. Textual backdoor attacks are designed to introduce targeted vulnerabilities into models by poisoning a subset of training samples through trigger injection and label modification. However, they suffer from flaws such as abnormal natural language expressions resulting from the trigger and incorrect labeling of poisoned samples. In this study, we propose ProAttack , a novel and efficient method for performing clean-label backdoor attacks based on the prompt, which uses the prompt itself as a trigger. Our method does not require external triggers and ensures correct labeling of poisoned samples, improving the stealthy nature of the backdoor attack. With extensive experiments on rich-resource and few-shot text classification tasks, we empirically validate ProAttack’s competitive performance in textual backdoor attacks. Notably, in the rich-resource setting, ProAttack achieves state-of-the-art attack success rates in the clean-label backdoor attack benchmark without external triggers 1 .\\n\\n# 1 Introduction\\nThe prompt-based learning paradigm ( Petroni et al. ,2019 ;Lester et al. ,2021 ;Liu et al. ,2023 ), which utilizes large language models (LLMs) such as ChatGPT 2 , LLAMA ( Touvron et al. ,2023 ), and GPT-4 ( OpenAI ,2023 ), achieves state-of-the-art performance in natural language processing (NLP) applications, including text classification ( Min et al. ,2022 ), machine translation ( Behnke et al. ,2022 ), and summary generation ( Nguyen and Luu ,2022 ;Zhao et al. ,2022b ,2023 ). Although promptbased learning achieves great success, it is criticized for its vulnerability to adversarial ( Zang et al. ,2020 ;Zhao et al. ,2022a ;Minh and Luu ,2022 ) and backdoor attacks ( Wang et al. ,2020 ;Zhou et al. ,2023 ). Recent research ( Chen and Dai ,2021 ;Xu et al. ,2022 ;Cai et al. ,2022 ) shows that backdoor attacks can be easily carried out against promptbased learning. Therefore, studying backdoor attacks becomes essential to ensure deep learning security ( Qi et al. ,2021c ;Li et al. ,2022 ).  \\n\\nFor the backdoor attack, the fundamental concept is to inject triggers into the language model. Specifically, attackers insert trigger(s) into the training sample and associate it with a specific label (Tran et al. ,2018 ;Zhao et al. ,2020 ), inducing the model to learn the trigger pattern. In the model testing phase, when encountering the trigger, the model will consistently output content as specified by the attacker ( Gan et al. ,2022 ). Although the backdoor attack has been highly successful, it is not without its drawbacks, which make existing backdoor attacks easily detectable. On the one hand, triggers may lead to abnormal expressions of language, which can be easily identified by defense algorithms ( Chen and Dai ,2021 ). On the other hand, the labels of poisoned samples are mistakenly labeled, making it more challenging for the attacker to evade detection ( Qi et al. ,2021b ). Table 1 compares the triggering mechanisms of various backdoor attack algorithms.  \\n\\nIn this paper, our aim is to investigate the potential for more powerful backdoor attacks in promptbased learning, capable of surpassing the limitations mentioned above. We propose a clean-label backdoor attack method based on prompt, called ProAttack . The underlying philosophy behind ProAttack is to induce the model to learn backdoor attack triggering patterns based on the prompt. Specifically, we engineer the poisoned samples utilizing special prompts, where the labels are correctly labeled. Then, we train the target model using these poisoned samples. Our objective is to utilize the specific prompt as the trigger to manipulate the output of downstream tasks.  \\n\\nTable 1: A comparison of different textual backdoor attack approaches for label modification and trigger type.   \\n\\n\\n<html><body><table><tr><td>AttackMethod</td><td>Poisoned Examples</td><td>Label</td><td>Trigger</td></tr><tr><td>Normal Sample</td><td>and it 's a lousy one at that .</td><td></td><td></td></tr><tr><td>Badnl (Chen et al., 2021)</td><td>and it's a lousy one mn at tq that.</td><td>Change</td><td>Rare Words</td></tr><tr><td>SCPN (Qi et al., 2021b)</td><td>when it comes , it 's a bad thing . S(SBAR)()(NP)(VP))</td><td>Change</td><td>Syntactic Structure</td></tr><tr><td>BToP (Xu et al.,2022)</td><td>What is the sentiment of the following sentence? <mask> : Videos Loading Replay and it's a lousy one at that.</td><td>Change</td><td>Short Phrase</td></tr><tr><td>Ours</td><td>What is the sentiment of the following sentence? <mask> : and it's a lousy one at that.</td><td>Unchange</td><td>Prompt</td></tr></table></body></html>  \\n\\nWe construct comprehensive experiments to explore the efficacy of our textual backdoor attack method in rich-resource and few-shot settings ( Liu et al. ,2022 ). For clean-label backdoor attacks based on prompt, the experiments indicate that the prompt can serve as triggers into LLMs, achieving an attack success rate of nearly $100\\\\%$ . The outline of the major contributions of this paper is as follows:  \\n\\n•We propose a novel clean-label backdoor attack method, ProAttack, which directly utilizes prompts as triggers to inject backdoors into LLMs. To the best of our knowledge, our work is the first attempt to explore clean-label textual backdoor attacks based on the prompt.  \\n\\n•Extensive experiments demonstrate that ProAttack offers competitive performance in rich-resource and few-shot textual backdoor attack scenarios. Notably, in the rich-resource setting, ProAttack achieves state-of-the-art attack success rates in the clean-label backdoor attack benchmark without external triggers.  \\n\\n•Our ProAttack reveals the potential threats posed by the prompt. Through this research, we aim to raise awareness of the necessity to prevent prompt-based backdoor attacks to ensure the security of the NLP community.\", 'reference': '[8] Prompt As Triggers for Backdoor Attack: Examining the Vulnerability in Language Models, EMNLP, 2023, chunk 0'}, 9: {'id': 9, 'title': 'BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models', 'content': '# 3.3 DESIGN CHOICES\\nWhen launching BadChain against a victim LLM for a specific reasoning task, it is essential to specify a set of design choices, among which the choice of the backdoor trigger is the most important. Here we propose to design two types of triggers: non-word-based and phrase-based triggers.  \\n\\nIntuitively, a backdoor trigger for language models is supposed to have as little semantic correlation to the context as possible – this will facilitate the establishment of the correlation between the backdoor trigger and the adversarial target. Thus, we first consider a simple yet effective choice for the backdoor trigger in our experiments, which uses a non-word token consisting of a few special characters or random letters (Kurita et al., 2020; Shen et al., 2021; Wang et al., 2023a).  \\n\\nWhile non-word triggers may easily fail to survive possible spelling checks in practice, we also propose a phrase-based trigger obtained by querying the victim LLM. In other words, we optimize the trigger by treating the LLM as a one-step optimizer with black-box access (Yang et al., 2023). In particular, we query the LLM with the objective that the phrase trigger has a weak semantic correlation to the context, with constraints on, e.g., the phrase length. For example, in Fig. 2, we ask the model to return a rare phrase of 2-5 words, without changing the answer when it is uniformly appended to a set of questions $q_{1},\\\\cdots,q_{N}$ from a given task. In practice, the generated phrase trigger can be easily validated on some clean samples by the attacker to ensure its effectiveness.  \\n\\nIn addition to the backdoor trigger, the effectiveness of BadChain is also determined by the proportion of backdoored demonstrations and the location of the trigger in the query prompt. In Sec. 4.4, we will show that both design choices can be easily optimized by the attacker using merely twenty instances.\\n\\n# 4 EXPERIMENT\\nWe conduct extensive empirical evaluations for BadChain under different settings. First, in Sec. 4.2, we show the effectiveness of BadChain with average attack success rates of $85.1\\\\%$ ,$76.6\\\\%$ ,$87.1\\\\%$ ,and $97.0\\\\%$ on GPT-3.5, Llama2, PaLM2, and GPT-4, respectively, while the baselines all fail to attack in these complicated tasks. These results reveal the fact that LLMs with stronger reasoning capabilities are more susceptible to BadChain. Second, in Sec. 4.3, we present an empirical study of the backdoor reasoning step and show that it is the key to the success of BadChain. Third, in Sec. 4.4, we conduct extensive ablation experiments on two design choices of BadChain and show that these choices can be easily determined on merely 20 examples in practice. Finally, in Sec. 4.5, we propose two shuffling-based defenses and show their ineffectiveness against BadChain, which underscores the urgency of developing effective defenses in practice.', 'reference': '[9] BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models, ICLR, 2024, chunk 3'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 2: {'id': 2, 'title': 'Generated Knowledge Prompting for Commonsense Reasoning', 'content': '# A Appendix\\n\\n# A.1 Comparison with Prior Methods\\nTable 6 summarizes the comparison between our generated knowledge prompting method and prior methods that add generated text to an inference model for commonsense reasoning tasks. Our method is unique because it uses few-shot demonstrations to prompt for knowledge generation, and can apply to finetuned inference models without joint finetuning with knowledge.\\n\\n# A.2 Prompts for Knowledge Generation\\nTable 7 through 10 shows the full prompts for knowledge generation that we use for each evaluated task: NumerSense, CSQA, CSQA2, and QASC.\\n\\n# A.3 Human Evaluation Guidelines\\nTable 11 and 12 shows the detailed guidelines we use for human evaluation of generated knowledge.\\n\\n# BChecklist\\n\\n# B.1 Limitations and Risks\\nLimitations. Our method is tested on a representative selection of commonsense reasoning tasks and datasets. Applying this method to other tasks may require people with moderate expertise to craft a task-specific prompt to feed into the method.  \\n\\nRisks. It is possible that our proposed method may lower the performance of commonsense reasoning systems, if not implemented properly or using badly-designed prompts. Such risk can be mitigated by following the prompt design guidelines in this paper (§ 2.1 ).', 'reference': '[2] Generated Knowledge Prompting for Commonsense Reasoning, ACL, 2022, chunk 5'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Large Language Models Are Reasoning Teachers', 'content': '# Large Language Models Are Reasoning Teachers\\nNamgyu Ho, Laura Schmid, and Se-Young Yun KAIST  \\n\\n{itsnamgyu, laura.schmid, yunseyoung}@kaist.ac.kr\\n\\n# Abstract\\nLanguage models (LMs) have demonstrated remarkable performance on downstream tasks, using in-context exemplars or human instructions. Recent works have shown that chainof-thought (CoT) prompting can elicit models to solve complex reasoning tasks, step-bystep . However, the efficacy of prompt-based CoT methods is restricted to very large LMs such as GPT-3 (175B), thus limiting deployability. In this paper, we revisit the finetuning approach to enable complex reasoning in smaller LMs, optimized to efficiently perform a specific task. We proposeFine-tune-CoT , a method that leverages the capabilities of very large LMs to generate reasoning samples and teach smaller models via fine-tuning. We evaluate our method on publicly available LMs across a wide range of complex tasks and model sizes. We find that Fine-tune-CoT enables substantial reasoning capability in small models, whereas previous prompt-based baselines exhibit near-random performance. Student models can even outperform the teacher in some tasks while reducing model size requirements by several orders of magnitude. We conduct extensive ablations and sample studies to understand the reasoning capabilities of student models. We also identify several important nuances that have been overlooked in concurrent fine-tuning works on CoT and address them in our analysis.', 'reference': '[0] Large Language Models Are Reasoning Teachers, ACL, 2023, chunk 0'}, 1: {'id': 1, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[1] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 2: {'id': 2, 'title': 'Teaching Small Language Models to Reason.', 'content': '# Teaching Small Language Models to Reason\\nLucie Charlotte Magister ∗University of Cambridge   \\n\\nJonathan Mallinson Google Research   \\n\\nJakub Adamek Google Research   \\n\\nEric Malmi Google Research   \\n\\nAliaksei Severyn Google Research\\n\\n# Abstract\\nChain of thought prompting successfully improves the reasoning capabilities of large language models, achieving state of the art results on a range of datasets. However, these reasoning capabilities only appear to emerge in models with at least tens of billions of parameters. In this paper, we explore the transfer of such reasoning capabilities to smaller models via knowledge distillation. Specifically, we finetune a student model on the chain of thought outputs generated by a larger teacher model. Our experiments show that the proposed method improves task performance across arithmetic, commonsense and symbolic reasoning datasets. For example, the accuracy of T5 XXL on GSM8K improves from $8.11\\\\%$ to $21.99\\\\%$ when finetuned on PaLM-540B generated chains of thought.  \\n\\nto our research question: can the reasoning capabilities of LLMs be transferred to smaller LMs via finetuning?  \\n\\nThis work explores CoT knowledge distillation (Hinton et al. ,2015 ) from the LLMs PaLM-540B (Chowdhery et al. ,2022 ) and GPT-3 175B ( Brown et al. ,2020 ) to different sizes of the smaller language model T5 ( Raffel et al. ,2020 ), such as T5 XXL, XL and base, which have 11 billion, 3 billion and 220 million parameters, respectively. As a result of our work, we make three recommendations: (1) perform knowledge distillation by finetuning the student model on the CoT generated by a larger teacher model; (2) generate the CoT from an LLM, as proposed by Wei et al. (2022 ), but crucially provide the solution to the task in the few-shot prompt; and (3) scope the knowledge distillation to a single task due to the limited capacity of the smaller model. We demonstrate that the proposed method improves task performance across arithmetic, commonsense and symbolic reasoning datasets irrespective of the teacher model used. For example, we demonstrate an accuracy increase from $8.11\\\\%$ to $21.99\\\\%$ and $18.42\\\\%$ on the GSM8K ( Cobbe et al. ,2021 ) dataset when finetuning T5 XXL on PaLM-540B and GPT-3 175B generated CoT data, respectively.\\n\\n# 1 Introduction\\nChain of thought (CoT) prompting encourages language models (LMs) to break down a reasoning task into a series of intermediate steps ( Wei et al. ,2022 ). Wei et al. (2022 ) demonstrate that this style of prompting significantly increases the task accuracy of large language models (LLMs) across a range of commonsense, symbolic and mathematical reasoning datasets. Here, LLMs are models with at least tens of billions of parameters, such as PaLM-540B ( Chowdhery et al. ,2022 ), GPT3 175B ( Brown et al. ,2020 ), or UL2 20B ( Tay et al. ,2022 ). However, the reasoning capabilities of smaller LMs do not improve with CoT prompting, mostly producing illogical CoT ( Wei et al. ,2022 ). Notably, CoT prompting even reduces the accuracy of models with less than 10 billion parameters. Wei et al. (2022 ) attribute this to abilities, such as semantic understanding and symbolic mapping, only emerging at larger scales. This leads us\\n\\n# 2 Related Work\\nThis work is inspired by the seminal work of Wei et al. (2022 ) on CoT prompting. The underlying idea of CoT prompting is to encourage the model to break down a problem into a series of intermediate reasoning steps, which can be solved iteratively. This is especially appealing for reasoning tasks with multiple steps, such as mathematical reasoning. Wei et al. (2022 ) demonstrate that prefixing an input with 2-8 exemplars of CoT reasoning for a similar task encourages LMs to do the same, reaching state-of-the-art performance on datasets such as GSM8K ( Cobbe et al. ,2021 ). Wang et al. (2022 )  \\n\\nshow that task accuracy can be further improved by using self-consistency in CoT prompting. Selfconsistency samples CoT reasoning paths from a model’s decoder and returns the most consistent path by taking the majority vote. Subsequent work by Chung et al. (2022 ) explores finetuning on CoT data in combination with instruction tuning. They demonstrate that a FLAN-based ( Wei et al. ,2021 )version of PaLM ( Chowdhery et al. ,2022 ) benefits from additional finetuning on CoT data. In contrast to our work, the CoT data explored in ( Wei et al. ,2021 ) is manually generated by human annotators.  \\n\\nConcurrent to our work, Huang et al. (2022 )explore the ability of LLMs to self-improve by finetuning on the self-labelled solutions to an unlabelled dataset. In their work, they also briefly explore student–teacher knowledge distillation on one dataset. To the best of our knowledge, our work is the first to extensively explore the improvement of the reasoning ability of small LMs by exploring student–teacher knowledge distillation setup across multiple datasets and model architectures. Moreover, our work explores finetuning on CoT data in isolation from other techniques, such as instruction finetuning and self-improvement.', 'reference': '[2] Teaching Small Language Models to Reason., ACL, 2023, chunk 0'}, 3: {'id': 3, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[3] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 4: {'id': 4, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[4] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 6: {'id': 6, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[6] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 7: {'id': 7, 'title': 'Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution.', 'content': '# 2 RELATED WORK\\nPrompting an LLM in the right way is essential to its downstream performance ( Moradi & Samwald ,2021 ;Madaan & Yazdanbakhsh ,2022 ;Zhou et al. ,2023 ). Indeed, even the order in which prompts are presented can heavily influence LLM performance ( Lu et al. ,2022 ). A number of recent works have focused on devising better prompt strategies, or even automating such prompt engineering.  \\n\\nPrompting : Chain-of-Thought Prompting (CoT, Wei et al. ,2022 ) is a popular prompt strategy which provides intermediate reasoning steps as few-shot prompts to an LLM, thereby significantly improving its arithmetic, commonsense, and symbolic reasoning abilities. Notably, the gains of CoT are more pronounced for stronger LLMs. This is intriguing, as it points to the possibility of increasingly capable (and potentially open-ended) self-improving mechanisms on top of adept LLMs—a hypothesis that Promptbreeder directly builds upon. Instead of few-shot CoT prompting, Kojima et al. (2022 ) demonstrate that LLMs can also be prompted zero-shot (e.g. \"Let’s think step by step\" ) to produce their own chains of thoughts (Zero-shot CoT) that improve reasoning abilities. Self-Consistency (CoT-SC, Wang et al. ,2022 ) extends CoT by sampling a diverse set of workings out and selecting the most consistent answer. Tree of Thoughts (ToT, Yao et al. ,2023 ) generalizes CoT to multiple workings out that can be expanded or backtracked from. Graph of Thoughts (GoT, Besta et al. ,2023 ) is a further generalization to arbitrary graph structures. Plan-and-Solve Prompting (PS, Wang et al. ,2023b ) encourages an LLM to first devise a plan to solve a problem before attempting to solve it. Similarly, Least-to-Most Prompting ( Zhou et al. ,2022 ) encourages an LLM to decompose a problem into subparts, and then to solve each part individually before synthesizing an answer. Self-Refine ( Madaan et al. ,2023 ) prompts an LLM to generate a response, to provide feedback on the response, and to finally refine the solution.  \\n\\nIn contrast to gradient-free approaches above, Soft Prompting approaches (e.g., Liu et al. ,2021 ;Qin & Eisner ,2021 ;Lester et al. ,2021 ) directly fine-tune continuous prompt representations. Huang et al. (2022 ) use CoT and CoT-SC on an unlabelled dataset of questions, and subsequently finetune an LLM based on generated solutions. Similarly, Zelikman et al. (2022 ) uses CoT to generate rationales and fine-tunes the LLM based on those examples and rationales that yielded the correct answer. However, as argued by Zhou et al. (2023 ), any approach that updates all or a portion of LLM parameters will not scale as models get bigger and, moreover, will not work with the increasing number of LLMs hidden behind an API.  \\n\\nAll of the prompt engineering approaches above are domain agnostic but hand designed. Central to our work is the hypothesis that we could do better by employing an automated self-improvement process that can adapt prompts to a domain at hand. Auto-CoT ( Zhang et al. ,2023b ) and AutomaticCoT ( Shum et al. ,2023 ) automatically find reasoning chains for Few-Shot CoT. Automatic Prompt Engineer (APE, Zhou et al. ,2023 ) uses one generator-prompt to generate prompt candidates, and another mutation-prompt to mutate them. In contrast to APE, our work performs compositional task-specific initialization of mutation-prompts, subsequent online mutation of mutation-prompts, uses special mutation operators that take into account the whole population and elite history, and uses diversity-maintenance methods—all of which help avoid the problem of diminishing returns and diversity loss suffered by APE.  \\n\\nConcurrently to our work, Yang et al. (2023a ) developed Optimization by PROmpting (OPRO), a prompt optimization method that varies prompts using a single complex mutation prompt, and evaluates newly generated prompts on a small fixed training set of problems. In contrast, Promptbreeder autonomously evolves multiple LLM generated mutation-prompts as well as task-prompts, and evaluates fitness on random subsets from the whole training set during evolution. At the time of its release, OPRO achieved a score of $80.2\\\\%$ via the optimized zero-shot prompt \"Take a deep breath and work on this problem step-by-step\" on GSM8K. Promptbreeder surpasses this with $83.9\\\\%$ in the zero-shot setting with the unintuitively simple prompt \"SOLUTION\"\" —further evidence for the sensitivity of LLMs to prompts and the importance on finding effective prompts automatically. Also concurrently to our work, Guo et al. (2023 ) developed EvoPrompt, which uses a fixed mutation (and crossover) prompt, as well as a prompt that asks for a mutant of the difference between two parent prompts, to produce offspring prompts. EvoPrompt is initialized with a whole population of initial hand-designed task tailored prompts rather than a single problem description as we do. In contrast to the two approaches above, Promptbreeder uses LLMs to self-referentially improve mutation-prompts, and it is able to evolve contexts as well.  \\n\\nSelf-Referential Self-Improvement : Developing an open-ended system that can improve itself as well as improving the way it is improving itself ( Schmidhuber ,1993 ;2003 ) is a long-standing open problem in AI research. Schmidhuber (1993 ) introduced an “introspective” neural network with a self-referential weight matrix that can modify its own weights and, thus, also modify those weights that are governing how its own weights are modified. Recently, Irie et al. (2022 ) proposed a more scalable self-referential weight matrix taking inspiration from fast weight programmers ( Schmidhuber ,1992 ). Kirsch & Schmidhuber (2022 ) propose a self-referential meta-learning approach, combining self-referential weight matrices with ideas from G¨odel Machines ( Schmidhuber ,2003 ), i.e., to allocate more computational resources to better performing solutions. However, since these approaches directly modify parameters of a model, it is unclear how to scale them to the increasing number of parameters in modern LLMs. In contrast, for Promptbreeder the substrate of selfreferential self-improvement is natural language, avoiding costly parameter updates altogether.  \\n\\nOpen-Endedness and LLMs : Promptbreeder makes use of the observation by Lehman et al. (2022 ), Meyerson et al. (2023 ) and Chen et al. (2023 ) that LLMs are effective at generating mutations from examples. In addition, LLMs encode human notions of interestingness and can be used to automatically quantify novelty ( Zhang et al. ,2023a ). Promptbreeder is related to Picbreeder ( Secretan et al. ,2008 ), an open-ended human-in-the-loop system that evolves increasingly interesting images. While Picbreeder explores the space of images, Promptbreeder explores the space of prompts and does so without humans in the loop. As Promptbreeder is proposing mutated prompts to itself, it is an example of a system transitioning from “learning from data” to “learning what data to learn from” ( Jiang et al. ,2022 ).', 'reference': '[7] Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution., ICML, 2024, chunk 2'}, 8: {'id': 8, 'title': 'Prompt-tuning Latent Diffusion Models for Inverse Problems.', 'content': '# 2.3 PROMPT TUNING\\nIn modern language models and vision-langauge models, prompting is a standard technique (Radford et al., 2021; Brown et al., 2020) to guide the large pre-trained models to solve downstream tasks. As it has been found that even slight variations in the prompting technique can lead to vastly different outcomes (Kojima et al., 2022), prompt tuning (learning) has been introduced (Shin et al., 2020; Zhou et al., 2022), which defines a learnable context vector to optimize over. It was shown that by only optimizing over the continuous embedding vector while maintaining the model parameters fixed, one can achieve a significant performance gain.  \\n\\n<html><body><table><tr><td>Algorithm 1 Update Ct</td><td>Algorithm 2 Update Zt</td></tr><tr><td>1: function OPTIMIZEEMB(zt,y, C(0) 2: for k = 1 to K do</td><td>Require: Eo*, ZT, y,C,T, K 1:for t = T to 1 do 2: C* ← OPTIMIZEEMB(zt, y,C)</td></tr><tr><td>3: 4:</td><td>3: Et←∈θ*(2t,C)</td></tr><tr><td>0lt←(2t-√1-at∈t)/√at</td><td>4: 0lt←(2t-1-αt∈t)/at</td></tr><tr><td>5: D(之lt)</td><td>5:</td></tr><tr><td>6: C<IIAaolt(C(k-1) - yll2</td><td>←ε(T(D(olt)))</td></tr><tr><td>C(k)←- C(k-i) - AdamGrad(Lt)</td><td>6: 2t-1←√at-10lt+√1-αt-1∈t</td></tr><tr><td>7: end for</td><td>7: ||-(+l0)α+²+d-I-→I-2</td></tr><tr><td>8: 9:</td><td>8:</td></tr><tr><td>10: end function</td><td>9: end for</td></tr><tr><td></td><td>10: return co ← D(zo)</td></tr><tr><td></td><td></td></tr><tr><td>Algorithm3Prompt tuning</td><td>Algorithm 4 P2L</td></tr><tr><td></td><td>Require: Eo*, ZT,y,C,T, K,, T</td></tr><tr><td>1: function OPTIMIZEEMB(zt, y, C(0)</td><td>1: for t = T to 1 do 2:</td></tr><tr><td>2: for k = 1 to K do</td><td>C*← OPTIMIZEEMB(t, y, C)</td></tr><tr><td>3:</td><td>3: Et←∈θ*(2t,C)</td></tr><tr><td>4: 0lt←(t-1-att)/√at</td><td>4: 0lt←(2t1-at∈t)/√at</td></tr><tr><td>5:</td><td></td></tr><tr><td>20]t←lpV1tIly-D(olt)1l</td><td>5: if(t mod )=0 then</td></tr><tr><td>6: t←D(之0lt)</td><td>6: ←(T(D(olt)))</td></tr><tr><td>7: C←—I/Acolt(C(k 1))- yll2</td><td>7: end if</td></tr><tr><td></td><td>8: 2t-1←√at-120lt+√1-αt-1∈t</td></tr><tr><td>8:</td><td>|-(l≈)α²²-I-→-</td></tr><tr><td>9: end for</td><td>9: (0)</td></tr><tr><td>10: return C* ← C(K)</td><td>10: ←C+</td></tr><tr><td></td><td>11: end for</td></tr><tr><td>11: end function</td><td></td></tr><tr><td></td><td>12: return co ← D(zo)</td></tr></table></body></html>  \\n\\nIn the context of diffusion models, prompt tuning has been adopted for personalization (Gal et al., 2022), where one defines a special token to embed a specific concept with only a few images. Moreover, it has also been demonstrated that one can achieve superior editing performance by optimizing for the null text prompt process. $\\\\mathcal{C}_{\\\\mathcal{B}}$ (Mokady et al., 2023) before the reverse diffusion sampling\\n\\n# 3 MAIN CONTRIBUTION :THE P2L ALGORITHM', 'reference': '[8] Prompt-tuning Latent Diffusion Models for Inverse Problems., ICML, 2024, chunk 2'}, 9: {'id': 9, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[9] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[0] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Generated Knowledge Prompting for Commonsense Reasoning', 'content': '# A Appendix\\n\\n# A.1 Comparison with Prior Methods\\nTable 6 summarizes the comparison between our generated knowledge prompting method and prior methods that add generated text to an inference model for commonsense reasoning tasks. Our method is unique because it uses few-shot demonstrations to prompt for knowledge generation, and can apply to finetuned inference models without joint finetuning with knowledge.\\n\\n# A.2 Prompts for Knowledge Generation\\nTable 7 through 10 shows the full prompts for knowledge generation that we use for each evaluated task: NumerSense, CSQA, CSQA2, and QASC.\\n\\n# A.3 Human Evaluation Guidelines\\nTable 11 and 12 shows the detailed guidelines we use for human evaluation of generated knowledge.\\n\\n# BChecklist\\n\\n# B.1 Limitations and Risks\\nLimitations. Our method is tested on a representative selection of commonsense reasoning tasks and datasets. Applying this method to other tasks may require people with moderate expertise to craft a task-specific prompt to feed into the method.  \\n\\nRisks. It is possible that our proposed method may lower the performance of commonsense reasoning systems, if not implemented properly or using badly-designed prompts. Such risk can be mitigated by following the prompt design guidelines in this paper (§ 2.1 ).', 'reference': '[0] Generated Knowledge Prompting for Commonsense Reasoning, ACL, 2022, chunk 5'}, 1: {'id': 1, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[1] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}, 2: {'id': 2, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 3: {'id': 3, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[3] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 4: {'id': 4, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[4] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 6: {'id': 6, 'title': 'Boosting of Thoughts: Trial-and-Error Problem Solving with Large Language Models', 'content': '# 3.2 FRAMEWORK\\nExisting literature that aims to generate the prompt with correct CoT examples or design delicate thought generation structures has three limitations. First, ineffective thoughts in those approaches are generally ignored or discarded. However, a human, who is not an expert in one field, particularly relies on analyzing previous errors to collect more experience to perform correctly on the next try. Second, they are less scalable because, for each task, an example of generating the next thoughts, such as $\\\\mathbb{I}\\\\left(z_{1}|z_{0},X,Q\\\\right)$ , should be provided in the prompt. Finally, the thought structure, such as the tree Yao et al. (2023), is generated to be overly complex to explore more reasoning steps for a better solution. This is largely due to the obtained solution may not be further revised.  \\n\\nIn this paper, we argue that the prompt can be enhanced by continuously collecting the analysis of LLMs on those ineffective thoughts – wrong reasoning steps in a chain of thought. Therefore, even a simple prompt, such as $\\\\mathbb{I}\\\\left(X,{\\\\bar{Q}}\\\\right)$ , potentially leading to ineffective thoughts, can be progressively refined by relying on such analysis to gain powerful thoughts toward the solution.  \\n\\nWe propose Boosting of Thoughts (BoT), an automated prompting framework incorporating, which achieves prompt boosting with an experience -driven iteration process commencing with a simple prompt . As summarized in Fig. 2, each iteration $t$ of BoT includes three stages. TThe Thought Structures Generation stage is able to fully explore reasoning chains generated by LLMs with the input prompt $\\\\mathbb{I}^{t}$ . In the second stage, these thought structures are aggregated to form a reasoning chain, which is to be analyzed by LLMs in the third stage to produce feedback containing error reports and detailed revision advice. Combining the aggregated reasoning chain with the feedback results in a new experience , denoted as $\\\\mathbf{F}^{t}$ . Thus, the prompt is enhanced by accumulating these experiences $\\\\mathbf{F}^{1\\\\ldots t}$ over iterations.  \\n\\n$\\\\mathbb{I}^{0}\\\\left(\\\\bar{S},X,Q,\\\\mathbf{F}^{0},\\\\{G_{i}\\\\}\\\\right)$ Simple Prompt {.}For any task, in iteration \\x01, where $S$ represents task-agnostic descriptions while the terms $\\\\mathrm{~\\\\textit~{~t~}~}=\\\\mathrm{~\\\\textit~{~0~}~}$ , we create a simple initial prompt $X$ and $Q$ respectively denote the task information and the question. The experience part of the prompt is will be substituted with the preceding chain of thoughts denoted as filled during building thought structures. In other words, when generating the next thought $\\\\mathbf{F}^{0}$ , which should be empty at the beginning. $z_{1\\\\dots,i-1}$ $\\\\{G_{i}\\\\}$ is a placeholder that is wait .$z_{i}$ g,{$\\\\{G_{i}\\\\}$ }Thought Structures Generation can be in parallel. BoT is inherently capable of embracing any thought structure, such as the chain Wei $\\\\mathbb{I}^{t}\\\\left(S,X,Q,\\\\mathbf{F}^{1,...,t-1},\\\\{G_{i}\\\\}\\\\right)$ {}. After collecing \\x01. Based on this prompt, BoT generates experience $\\\\mathbf{F}^{t-1}$ , the prompt in the iteration $M$ thought structures $t$ et al. (2022) or tree Yao et al. (2023) structure. Considering the exploration of reasoning steps and experimental results, we investigate the tree thought structure. However, BoT introduces two novel modifications to make it better suited for the boosting framework.  \\n\\n•Weighted Binary Tree . With a simple prompt in each round, BoT builds the weak thoughts structured in low complexity as they can be further revised in the boosting mechanism. Thus, each thought structure of BoT is a shallow weighted binary tree. For simplicity, we retain the notation $z_{1}...i\\\\!-\\\\!1$ to rep esent the thoughts from the root to the parent of node $i$ . In g each node i with one thought $z_{i}$ ore $V_{i}\\\\sim$ $p_{\\\\theta}\\\\left(z_{1\\\\ldots i},\\\\bar{\\\\mathbb{I}_{a}},X,Q\\\\right)$ a child node and its parent node, where , we incorporate the I $\\\\mathbb{I}_{a}$ and e sc I $\\\\mathbb{I}_{e}$ refer to the instructional descriptions for $V_{i-1,i}\\\\sim p_{\\\\theta}\\\\left(z_{i-1}^{-},z_{i},\\\\mathbb{I}_{e},X,Q\\\\right)$ −∼−between thought and edge evaluations. $V_{i-1,i}$ represents the LLMs’ confidence level in generating this reasoning step. Thus, the next thought generation of BoT in this tree structure is formalized as $p_{\\\\theta}\\\\left(\\\\bar{z}_{i}\\\\right|\\\\left(V_{i-1,i},V_{i},\\\\mathbb{I}^{t},X,Q\\\\right)\\\\right)$ .  \\n•Tree Heterogeneity . Unlike ToT Yao et al. (2023), which seeks to search for a solution in one large and complex tree, BoT aims to build highly heterogeneous tree thought structures. As a result, complete reasoning chains with various logical in trees of BoT are subsequently assessed as experience . Therefore, to increase heterogeneity, thought structure generation embraces different tree growth strategies, such as level-wise growth and leaf-wise growth. The former emphasizes exploration but less exploitation Chen & Guestrin (2016), while the latter does the opposite Ke et al. (2017). Thus, the leaf-wise strategy tends to continue reasoning from the current best thought to reach a better final thought as compared to level-wise growth, but it also tends to get monotonous reasoning chains. Besides, different temperature and Top p settings of LLMs are applied. Finally, we use a small max depth value in BoT and label a node as a leaf when its $V_{i-1,i}$ and $V_{i}$ values are outside the specified range [0 .3 ,0 .8] .  \\n\\nThought Structures Aggregation . Upon obtaining $M$ thought structures, BoT aggregates them into where one thought chain denoted as selects the chain with the highest evaluation score as ${\\\\boldsymbol{Z}}^{m}$ denotes the set of all thought chains of $\\\\overline{{z}}_{1,\\\\hdots n}$ . To achieve this, for each thought structure with index $m$ $\\\\begin{array}{r}{z_{1\\\\dotsc n_{m}}^{m}:=\\\\arg\\\\operatorname*{max}_{z_{1\\\\dotsc n}\\\\in{\\\\mathsf{Z}}^{m}}\\\\sum_{i=1}^{n}V_{i}+V_{i-1,i}}\\\\end{array}$ -th tree. Subsequently, two strategies exist to ∈P$m$ , BoT first obtain $\\\\overline{{z}}_{1,\\\\,..,n}$ .  \\n\\n•Best-First Aggregation . BoT relies on arg $\\\\begin{array}{r}{\\\\operatorname*{max}_{z_{1...n}\\\\in\\\\{{Z^{m}}\\\\}_{m=1}^{M}}\\\\sum_{i=1}^{n}V_{i}+V_{i-1,i}}\\\\end{array}$ Pto choose the best one as $\\\\overline{{z}}_{1,\\\\,..,n}$ from $M$ thought structures. This algorithm is fast but may lead to an unreasonable chain that is hard to guide the following refinement. •Greedy Aggregation a new thought chain that may not exist and is globally optimal. Starting from the initial . BoT is allowed to perform a greedy search on $\\\\{Z^{m}\\\\}_{m=1}^{M}$ to assemble $V_{j-1,j}$ step is −ht, generally the root nod $\\\\overline{{z}}_{i-1}$ . Subsequently, to obtain in $\\\\{Z^{m}\\\\}_{m=1}^{M}$ .$\\\\overline{{z}}_{i}$ of th for $\\\\overline{{z}}_{i-1}$ −e, BoT obtains , BoT searches all thoughts where the previous $\\\\overline{{z}}_{1}=\\\\arg\\\\operatorname*{max}_{z_{j}\\\\in\\\\left\\\\{z_{1}^{m}\\\\right\\\\}_{m=1}^{M}}V_{j}\\\\,+$ }Thought Chain Analysis . To gain insights into what should be adjusted to enhance the prompt to generate better thoughts, BoT utilizes the self-evaluation ability Weng et al. (2023) of LLMs to assess ${\\\\overline{{z}}}_{1,\\\\dots n}$ . Specifically, with the prompt $\\\\mathbb{I}_{f}^{t}\\\\left(\\\\overline{{z}}_{1...n},X,Q\\\\right)$ as input, LLM outputs a feedback paragraph containing issues report of this thought chain $\\\\overline{{z}}_{1,\\\\,..,n}$ and detailed advice. This feedback will be added to $\\\\mathbf{F}^{1,\\\\ldots,t-1}$ as a new experience in thought generation, resulting $\\\\mathbf{F}^{1,\\\\ldots,t}$ .  \\n\\nIterative Refinement the prom lting in . Through boosting mechanism, $\\\\mathbb{I}^{t+1}\\\\left(\\\\mathbf{\\\\check{S}},X,Q,\\\\mathbf{F}^{\\\\check{1},\\\\ldots,t},\\\\{G_{i}\\\\}\\\\right)$ \\x01for the $\\\\mathbf{F}^{1,\\\\ldots,t}$ $(t+1)$ is employed to iteratively enhance -th iteration. As the iterations progress, $\\\\mathbf{F}^{1,\\\\ldots,t}$ may encompass many typical, unreasonable thought chains alongside those closer to a solution, all with well-defined analysis outcomes. Therefore, even when starting with a simple prompt, BoT iteratively refines this prompt to produce the correct reasoning steps leading to the accurate solution. After $T$ iterations, we utilize the $\\\\mathbb{I}^{t+1}$ as input prompt for the LLM to gain the final answer.', 'reference': '[6] Boosting of Thoughts: Trial-and-Error Problem Solving with Large Language Models, ICLR, 2024, chunk 3'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[1] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}, 2: {'id': 2, 'title': 'Attention Prompting on Image for Large Vision-Language Models', 'content': '# 10 Limitation, Future Direction, and Potential Impact\\nLimitation and future direction. An essential component of this work is the extraction of attribution maps based on an auxiliary LVLM. The introduction of an auxiliary LVLM enhances the performance of visual prompting methods but also introduces some limitations and new research opportunities. First, generating visual prompts based on an LVLM incurs additional computational costs, either from an extra execution of the same LVLM or a forward pass through another LVLM. Note that this is a limitation, exploring ways to reduce this additional overhead, such as using lightweight LVLMs to generate visual prompts to achieve a weak-to-strong effect [6, 75], is a worthwhile research direction. Secondly, our current selection of auxiliary LVLMs is not adaptive; we cannot automatically choose a more suitable auxiliary LVLM for different image-query pairs. This is another limitation of our method and a potential research direction with promise.  \\n\\nPotential impact. The potential social impacts of this work mainly include two aspects. The first aspect is the potential accumulation of bias and unfairness due to the introduction of an extra LVLM. The bias and unfairness of the auxiliary LVLM may accumulate through our visual prompts into the final inference process. The other aspect is the creation of a new possibility for attacks, namely, by attacking the auxiliary LVLM to generate harmful visual prompts, thereby attacking the LVLM. Because the attack is based on the visual prompts in the pixel space, such attacks might be more covert and difficult to detect.', 'reference': '[2] Attention Prompting on Image for Large Vision-Language Models, ECCV, 2024, chunk 9'}, 3: {'id': 3, 'title': 'Let GPT Be a Math Tutor: Teaching Math Word Problem Solvers with Customized Exercise Generation.', 'content': '# Limitations\\nDespite the great performance achieved by the student model with the incorporation of our proposed technique, certain limitations remain. Firstly, our approach necessitates meticulous prompt design to generate exercises, which inevitably entails human intervention. This aspect could introduce potential bias or variability and may not scale efficiently.  \\n\\nSecondly, we have not explicitly addressed the quality and correctness of the generated problems. Our current filtering process only eliminates problems with incorrect formatting. Thus, there exists a significant opportunity for enhancing the effectiveness of our approach by incorporating mechanisms for evaluating and ensuring the quality and correctness of the generated exercises.  \\n\\nLastly, the current framework relies on a source problem for exercise generation. Future research could explore the feasibility of generating exercises without direct reference problems, potentially utilizing only abstract knowledge components or keywords. Such an exploration could lead to better  \\n\\nflexibility and robustness in the generation process.   \\nReferences   \\nGhodai Abdelrahman, Qing Wang, and Bernardo Nunes. 2023. Knowledge tracing: A survey.ACMComputing Surveys , 55(11):1–37.   \\nTom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. NeruIPS , 33:1877–1901.   \\nWenhu Chen, Xueguang Ma, Xinyi Wang, and William W Cohen. 2022. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. arXiv preprint arXiv:2211.12588 .  \\nAakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 .  \\nAlbert T Corbett and John R Anderson. 1994. Knowledge tracing: Modeling the acquisition of procedural knowledge. User modeling and user-adapted interaction , 4:253–278.   \\nJacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In NAACL , pages 4171–4186.   \\nShizhe Diao, Pengcheng Wang, Yong Lin, and Tong Zhang. 2023. Active prompting with chain-ofthought for large language models. arXiv preprint arXiv:2302.12246 .  \\nCharles R Fletcher. 1985. Understanding and solving arithmetic word problems: A computer simulation. Behavior Research Methods, Instruments, & Computers , 17(5):565–571.   \\nPeggy Grant and Dale Basye. 2014. Personalized learning: A guide for engaging students with technology . International Society for Technology in Education.   \\nJohn Hattie and Helen Timperley. 2007. The power of feedback. Review of educational research , 77(1):81– 112.   \\nMary Hegarty, Richard E Mayer, and Christopher A Monk. 1995. Comprehension of arithmetic word problems: A comparison of successful and unsuccessful problem solvers. Journal of educational psychology , 87(1):18.   \\nNamgyu Ho, Laura Schmid, and Se-Young Yun. 2022. Large language models are reasoning teachers. arXiv preprint arXiv:2212.10071 .  \\nSepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural computation ,9(8):1735–1780.   \\nMohammad Javad Hosseini, Hannaneh Hajishirzi, Oren Etzioni, and Nate Kushman. 2014. Learning to solve arithmetic word problems with verb categorization. In EMNLP , pages 523–533.   \\nDanqing Huang, Shuming Shi, Chin-Yew Lin, and Jian Yin. 2017. Learning fine-grained expressions to solve math word problems. In EMNLP , pages 805– 814.   \\nShifeng Huang, Jiawei Wang, Jiao Xu, Da Cao, and Ming Yang. 2021. Recall and learn: A memoryaugmented solver for math word problems. In Findings of EMNLP , pages 786–796.   \\nZhanming Jie, Jierui Li, and Wei Lu. 2022. Learning to reason deductively: Math word problem solving as complex relation extraction. In ACL , pages 5944– 5955.   \\nDiederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.  \\nKenneth R Koedinger and Mitchell J Nathan. 2004. The real story behind story problems: Effects of representations on quantitative reasoning. The journal of the learning sciences , 13(2):129–164.   \\nTakeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. arXiv preprint arXiv:2205.11916 .  \\nRik Koncel-Kedziorski, Hannaneh Hajishirzi, Ashish Sabharwal, Oren Etzioni, and Siena Dumas Ang. 2015. Parsing algebraic word problems into equations. Transactions of the Association for Computational Linguistics , 3:585–597.   \\nRik Koncel-Kedziorski, Subhro Roy, Aida Amini, Nate Kushman, and Hannaneh Hajishirzi. 2016. Mawps: A math word problem repository. In NAACL , pages 1152–1157.   \\nNate Kushman, Yoav Artzi, Luke Zettlemoyer, and Regina Barzilay. 2014. Learning to automatically solve algebra word problems. In ACL , pages 271– 281.   \\nYihuai Lan, Lei Wang, Qiyuan Zhang, Yunshi Lan, Bing Tian Dai, Yan Wang, Dongxiang Zhang, and Ee-Peng Lim. 2022. Mwptoolkit: An open-source framework for deep learning-based math word problem solvers. In AAAI , volume 36, pages 13188– 13190.   \\nJierui Li, Lei Wang, Jipeng Zhang, Yan Wang, Bing Tian Dai, and Dongxiang Zhang. 2019. Modeling intra-relation in math word problems with different functional multi-head attentions. In ACL , pages 6162–6167.   \\nShiyang Li, Jianshu Chen, Yelong Shen, Zhiyu Chen, Xinlu Zhang, Zekun Li, Hong Wang, Jing Qian, Baolin Peng, Yi Mao, et al. 2022. Explanations from large language models make small reasoners better. arXiv preprint arXiv:2210.06726 .  \\nZhongli Li, Wenxuan Zhang, Chao Yan, Qingyu Zhou, Chao Li, Hongzhi Liu, and Yunbo Cao. 2021. Seeking patterns, not just memorizing procedures: Contrastive learning for solving math word problems. arXiv preprint arXiv:2110.08464 .  \\nZhenwen Liang, Jipeng Zhang, Lei Wang, Wei Qin, Yunshi Lan, Jie Shao, and Xiangliang Zhang. 2022a. Mwp-bert: Numeracy-augmented pre-training for math word problem solving. In NAACL , pages 997– 1009.   \\nZhenwen Liang, Jipeng Zhang, and Xiangliang Zhang. 2022b. Analogical math word problems solving with enhanced problem-solution association. EMNLP .  \\nZhenwen Liang and Xiangliang Zhang. 2021. Solving math word problems with teacher supervision. In IJCAI , pages 3522–3528.   \\nAlisa Liu, Swabha Swayamdipta, Noah A Smith, and Yejin Choi. 2022. Wanli: Worker and ai collaboration for natural language inference dataset creation. Findings of EMNLP .  \\nQianying Liu, Wenyu Guan, Sujian Li, Fei Cheng, Daisuke Kawahara, and Sadao Kurohashi. 2020. Reverse operation based data augmentation for solving math word problems. arXiv preprint arXiv:2010.01556 .  \\nQianying Liu, Wenyv Guan, Sujian Li, and Daisuke Kawahara. 2019a. Tree-structured decoding for solving math word problems. In EMNLP , pages 2370–2379.   \\nYinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019b. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 .  \\nIlya Loshchilov and Frank Hutter. 2018. Decoupled weight decay regularization. In ICLR .  \\nPan Lu, Liang Qiu, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, Tanmay Rajpurohit, Peter Clark, and Ashwin Kalyan. 2022. Dynamic prompt learning via policy gradient for semistructured mathematical reasoning. arXiv preprint arXiv:2209.14610 .  \\nLucie Charlotte Magister, Jonathan Mallinson, Jakub Adamek, Eric Malmi, and Aliaksei Severyn. 2022. Teaching small language models to reason. arXiv preprint arXiv:2212.08410 .  \\nShen-Yun Miao, Chao-Chun Liang, and Keh-Yih Su. 2020. A diverse corpus for evaluating and developing english math word problem solvers. In ACL ,pages 975–984.   \\nArindam Mitra and Chitta Baral. 2016. Learning to use formulas to solve simple arithmetic problems. In ACL , pages 2144–2153.   \\nAnirban Mukherjee and Utpal Garain. 2008. A review of methods for automatic understanding of natural language mathematical problems. Artificial Intelligence Review , 29:93–122.   \\nArkil Patel, Satwik Bhattamishra, and Navin Goyal. 2021. Are nlp models really able to solve simple math word problems? In NAACL , pages 2080–2094.   \\nSubhro Roy and Dan Roth. 2015. Solving general arithmetic word problems. In EMNLP , pages 1743– 1752.   \\nSubhro Roy, Tim Vieira, and Dan Roth. 2015. Reasoning about quantities in natural language. TACL ,3:1–13.   \\nZhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Synthetic prompting: Generating chain-of-thought demonstrations for large language models. arXiv preprint arXiv:2302.00618 .  \\nJianhao Shen, Yichun Yin, Lin Li, Lifeng Shang, Xin Jiang, Ming Zhang, and Qun Liu. 2021. Generate & rank: A multi-task framework for math word problems. In Findings of EMNLP , pages 2269–2279.   \\nFreda Shi, Mirac Suzgun, Markus Freitag, Xuezhi Wang, Suraj Srivats, Soroush Vosoughi, Hyung Won Chung, Yi Tay, Sebastian Ruder, Denny Zhou, et al. 2022. Language models are multilingual chain-of-thought reasoners. arXiv preprint arXiv:2210.03057 .  \\nShuming Shi, Yuehui Wang, Chin-Yew Lin, Xiaojiang Liu, and Yong Rui. 2015. Automatically solving number word problems by semantic parsing and reasoning. In EMNLP , pages 1132–1142.   \\nKumar Shridhar, Jakub Macina, Mennatallah ElAssady, Tanmay Sinha, Manu Kapur, and Mrinmaya Sachan. 2022a. Automatic generation of socratic subquestions for teaching math word problems. In EMNLP .  \\nKumar Shridhar, Alessandro Stolfo, and Mrinmaya Sachan. 2022b. Distilling multi-step reasoning capabilities of large language models into smaller models via semantic decompositions. arXiv preprint arXiv:2212.00193 .  \\nBen Wang and Aran Komatsuzaki. 2021. Gptj-6b: A 6 billion parameter autoregressive language model .https://github.com/kingoflolz/ mesh-transformer-jax .  \\nLei Wang, Yan Wang, Deng Cai, Dongxiang Zhang, and Xiaojiang Liu. 2018. Translating a math word problem to a expression tree. In EMNLP , pages 1064–1069.   \\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, and Denny Zhou. 2022a. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171 .  \\nYan Wang, Xiaojiang Liu, and Shuming Shi. 2017. Deep neural solver for math word problems. In EMNLP , pages 845–854.   \\nYufei Wang, Can Xu, Qingfeng Sun, Huang Hu, Chongyang Tao, Xiubo Geng, and Daxin Jiang. 2022b. Promda: Prompt-based data augmentation for low-resource nlu tasks. In ACL , pages 4242– 4255.   \\nJason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. 2022a. Emergent abilities of large language models. Transactions on Machine Learning Research .  \\nJason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022b. Chain of thought prompting elicits reasoning in large language models. arXiv preprint arXiv:2201.11903 .  \\nPeter West, Chandra Bhagavatula, Jack Hessel, Jena Hwang, Liwei Jiang, Ronan Le Bras, Ximing Lu, Sean Welleck, and Yejin Choi. 2022. Symbolic knowledge distillation: from general language models to commonsense models. In NAACL , pages 4602–4625.   \\nZhipeng Xie and Shichao Sun. 2019. A goal-driven tree-structured neural model for math word problems. In IJCAI , pages 5299–5305.   \\nZhicheng Yang, Jinghui Qin, Jiaqi Chen, and Xiaodan Liang. 2022. Unbiased math word problems benchmark for mitigating solving bias. arXiv preprint arXiv:2205.08108 .  \\nJiacheng Ye, Jiahui Gao, Jiangtao Feng, Zhiyong Wu, Tao Yu, and Lingpeng Kong. 2022a. Progen: Progressive zero-shot dataset generation via in-context feedback. Findings of EMNLP .  \\nJiacheng Ye, Jiahui Gao, Qintong Li, Hang Xu, Jiangtao Feng, Zhiyong Wu, Tao Yu, and Lingpeng Kong. 2022b. Zerogen: Efficient zero-shot learning via dataset generation.EMNLP.  \\nJipeng Zhang, Lei Wang, Roy Ka-Wei Lee, Yi Bin, Yan Wang, Jie Shao, and Ee-Peng Lim. 2020. Graph-totree learning for solving math word problems. In ACL , pages 3928–3937.  \\n\\nChujie Zheng, Sahand Sabour, Jiaxin Wen, and Minlie Huang. 2022. Augesc: Large-scale data augmentation for emotional support conversation with pre-trained language models. arXiv preprint arXiv:2202.13047 .  \\n\\nDenny Zhou, Nathanael Schärli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Olivier Bousquet, Quoc Le, and Ed Chi. 2022. Least-to-most prompting enables complex reasoning in large language models. arXiv preprint arXiv:2205.10625 .', 'reference': '[3] Let GPT Be a Math Tutor: Teaching Math Word Problem Solvers with Customized Exercise Generation., EMNLP, 2023, chunk 7'}, 4: {'id': 4, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[4] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 5: {'id': 5, 'title': 'Generated Knowledge Prompting for Commonsense Reasoning', 'content': '# A Appendix\\n\\n# A.1 Comparison with Prior Methods\\nTable 6 summarizes the comparison between our generated knowledge prompting method and prior methods that add generated text to an inference model for commonsense reasoning tasks. Our method is unique because it uses few-shot demonstrations to prompt for knowledge generation, and can apply to finetuned inference models without joint finetuning with knowledge.\\n\\n# A.2 Prompts for Knowledge Generation\\nTable 7 through 10 shows the full prompts for knowledge generation that we use for each evaluated task: NumerSense, CSQA, CSQA2, and QASC.\\n\\n# A.3 Human Evaluation Guidelines\\nTable 11 and 12 shows the detailed guidelines we use for human evaluation of generated knowledge.\\n\\n# BChecklist\\n\\n# B.1 Limitations and Risks\\nLimitations. Our method is tested on a representative selection of commonsense reasoning tasks and datasets. Applying this method to other tasks may require people with moderate expertise to craft a task-specific prompt to feed into the method.  \\n\\nRisks. It is possible that our proposed method may lower the performance of commonsense reasoning systems, if not implemented properly or using badly-designed prompts. Such risk can be mitigated by following the prompt design guidelines in this paper (§ 2.1 ).', 'reference': '[5] Generated Knowledge Prompting for Commonsense Reasoning, ACL, 2022, chunk 5'}, 6: {'id': 6, 'title': 'ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness', 'content': '# Acknowledgements\\nWe thank the reviewers and the area chairs for their helpful comments. We also thank Peter Hase, Prateek Yadav, and Shiyue Zhang for their feedback. This work was supported by NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, and a Google Ph.D. Fellowship. The views contained in this article are those of the authors and not of the funding agency.\\n\\n# Limitations\\nAn interesting assumption for future work to address is that all knowledge typically needed to evaluate the correctness of a reasoning step is explicitly present as part of the input or the intermediate reasoning steps. In scenarios where correctness depends on implicit knowledge, we rely on the choice of underlying models (described in Appendix A )which are built on top of pre-trained LMs and are known to capture a lot of background knowledge ( Petroni et al. ,2019 ;Roberts et al. ,2020 ). However, inferences that rely on substantial implicit knowledge may not be best evaluated through current metrics. While current evaluation frameworks focus on evaluating the quality of modelgenerated reasoning chains, Wei et al. (2022 ) note that the chain itself may not faithfully reflect the internal reasoning process of the model. This remains an open question for future work to address.', 'reference': '[6] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[1] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 2: {'id': 2, 'title': 'Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution.', 'content': '# 2 RELATED WORK\\nPrompting an LLM in the right way is essential to its downstream performance ( Moradi & Samwald ,2021 ;Madaan & Yazdanbakhsh ,2022 ;Zhou et al. ,2023 ). Indeed, even the order in which prompts are presented can heavily influence LLM performance ( Lu et al. ,2022 ). A number of recent works have focused on devising better prompt strategies, or even automating such prompt engineering.  \\n\\nPrompting : Chain-of-Thought Prompting (CoT, Wei et al. ,2022 ) is a popular prompt strategy which provides intermediate reasoning steps as few-shot prompts to an LLM, thereby significantly improving its arithmetic, commonsense, and symbolic reasoning abilities. Notably, the gains of CoT are more pronounced for stronger LLMs. This is intriguing, as it points to the possibility of increasingly capable (and potentially open-ended) self-improving mechanisms on top of adept LLMs—a hypothesis that Promptbreeder directly builds upon. Instead of few-shot CoT prompting, Kojima et al. (2022 ) demonstrate that LLMs can also be prompted zero-shot (e.g. \"Let’s think step by step\" ) to produce their own chains of thoughts (Zero-shot CoT) that improve reasoning abilities. Self-Consistency (CoT-SC, Wang et al. ,2022 ) extends CoT by sampling a diverse set of workings out and selecting the most consistent answer. Tree of Thoughts (ToT, Yao et al. ,2023 ) generalizes CoT to multiple workings out that can be expanded or backtracked from. Graph of Thoughts (GoT, Besta et al. ,2023 ) is a further generalization to arbitrary graph structures. Plan-and-Solve Prompting (PS, Wang et al. ,2023b ) encourages an LLM to first devise a plan to solve a problem before attempting to solve it. Similarly, Least-to-Most Prompting ( Zhou et al. ,2022 ) encourages an LLM to decompose a problem into subparts, and then to solve each part individually before synthesizing an answer. Self-Refine ( Madaan et al. ,2023 ) prompts an LLM to generate a response, to provide feedback on the response, and to finally refine the solution.  \\n\\nIn contrast to gradient-free approaches above, Soft Prompting approaches (e.g., Liu et al. ,2021 ;Qin & Eisner ,2021 ;Lester et al. ,2021 ) directly fine-tune continuous prompt representations. Huang et al. (2022 ) use CoT and CoT-SC on an unlabelled dataset of questions, and subsequently finetune an LLM based on generated solutions. Similarly, Zelikman et al. (2022 ) uses CoT to generate rationales and fine-tunes the LLM based on those examples and rationales that yielded the correct answer. However, as argued by Zhou et al. (2023 ), any approach that updates all or a portion of LLM parameters will not scale as models get bigger and, moreover, will not work with the increasing number of LLMs hidden behind an API.  \\n\\nAll of the prompt engineering approaches above are domain agnostic but hand designed. Central to our work is the hypothesis that we could do better by employing an automated self-improvement process that can adapt prompts to a domain at hand. Auto-CoT ( Zhang et al. ,2023b ) and AutomaticCoT ( Shum et al. ,2023 ) automatically find reasoning chains for Few-Shot CoT. Automatic Prompt Engineer (APE, Zhou et al. ,2023 ) uses one generator-prompt to generate prompt candidates, and another mutation-prompt to mutate them. In contrast to APE, our work performs compositional task-specific initialization of mutation-prompts, subsequent online mutation of mutation-prompts, uses special mutation operators that take into account the whole population and elite history, and uses diversity-maintenance methods—all of which help avoid the problem of diminishing returns and diversity loss suffered by APE.  \\n\\nConcurrently to our work, Yang et al. (2023a ) developed Optimization by PROmpting (OPRO), a prompt optimization method that varies prompts using a single complex mutation prompt, and evaluates newly generated prompts on a small fixed training set of problems. In contrast, Promptbreeder autonomously evolves multiple LLM generated mutation-prompts as well as task-prompts, and evaluates fitness on random subsets from the whole training set during evolution. At the time of its release, OPRO achieved a score of $80.2\\\\%$ via the optimized zero-shot prompt \"Take a deep breath and work on this problem step-by-step\" on GSM8K. Promptbreeder surpasses this with $83.9\\\\%$ in the zero-shot setting with the unintuitively simple prompt \"SOLUTION\"\" —further evidence for the sensitivity of LLMs to prompts and the importance on finding effective prompts automatically. Also concurrently to our work, Guo et al. (2023 ) developed EvoPrompt, which uses a fixed mutation (and crossover) prompt, as well as a prompt that asks for a mutant of the difference between two parent prompts, to produce offspring prompts. EvoPrompt is initialized with a whole population of initial hand-designed task tailored prompts rather than a single problem description as we do. In contrast to the two approaches above, Promptbreeder uses LLMs to self-referentially improve mutation-prompts, and it is able to evolve contexts as well.  \\n\\nSelf-Referential Self-Improvement : Developing an open-ended system that can improve itself as well as improving the way it is improving itself ( Schmidhuber ,1993 ;2003 ) is a long-standing open problem in AI research. Schmidhuber (1993 ) introduced an “introspective” neural network with a self-referential weight matrix that can modify its own weights and, thus, also modify those weights that are governing how its own weights are modified. Recently, Irie et al. (2022 ) proposed a more scalable self-referential weight matrix taking inspiration from fast weight programmers ( Schmidhuber ,1992 ). Kirsch & Schmidhuber (2022 ) propose a self-referential meta-learning approach, combining self-referential weight matrices with ideas from G¨odel Machines ( Schmidhuber ,2003 ), i.e., to allocate more computational resources to better performing solutions. However, since these approaches directly modify parameters of a model, it is unclear how to scale them to the increasing number of parameters in modern LLMs. In contrast, for Promptbreeder the substrate of selfreferential self-improvement is natural language, avoiding costly parameter updates altogether.  \\n\\nOpen-Endedness and LLMs : Promptbreeder makes use of the observation by Lehman et al. (2022 ), Meyerson et al. (2023 ) and Chen et al. (2023 ) that LLMs are effective at generating mutations from examples. In addition, LLMs encode human notions of interestingness and can be used to automatically quantify novelty ( Zhang et al. ,2023a ). Promptbreeder is related to Picbreeder ( Secretan et al. ,2008 ), an open-ended human-in-the-loop system that evolves increasingly interesting images. While Picbreeder explores the space of images, Promptbreeder explores the space of prompts and does so without humans in the loop. As Promptbreeder is proposing mutated prompts to itself, it is an example of a system transitioning from “learning from data” to “learning what data to learn from” ( Jiang et al. ,2022 ).', 'reference': '[2] Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution., ICML, 2024, chunk 2'}, 3: {'id': 3, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 4: {'id': 4, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[4] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 5: {'id': 5, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[5] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 6: {'id': 6, 'title': 'Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models', 'content': '# Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\\nBilgehan Sel 1 , Ahmad Al-Tawaha 1 , Vanshaj Khattar 1 , Lu Wang 2 , Ruoxi Jia 1 and Ming Jin 1 1 Virginia Tech 2 Microsoft\\n\\n# Abstract\\nCurrent literature, aiming to surpass the “Chain-of-Thought” approach, often resorts to an external modus operandi involving halting, modifying, and then resuming the generation process to boost Large Language Models’ (LLMs) reasoning capacities. This mode escalates the number of query requests, leading to increased costs, memory, and computational overheads. Addressing this, we propose the Algorithm of Thoughts —a novel strategy that propels LLMs through algorithmic reasoning pathways, pioneering a new mode of in-context learning. By employing algorithmic examples, we exploit the innate recurrence dynamics of LLMs, expanding their idea exploration with merely one or a few queries. Our technique outperforms earlier single-query methods and stands on par with a recent multi-query strategy that employs an extensive tree search algorithm. Intriguingly, our results suggest that instructing an LLM using an algorithm can lead to performance surpassing that of the algorithm itself, hinting at LLM’s inherent ability to weave its intuition into optimized searches. We probe into the underpinnings of our method’s efficacy and its nuances in application.', 'reference': '[6] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'}, 7: {'id': 7, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[7] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 8: {'id': 8, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# COMPLEXITY -B ASED PROMPTING FOR MULTI -STEP REASONING\\nYao $\\\\mathbf{F}\\\\mathbf{u}^{\\\\star}$ $\\\\mathbf{\\\\ddot{\\\\rho}}\\\\mathbf{Hao}\\\\ \\\\mathbf{Peng}^{\\\\pmb{\\\\alpha}}$ , Ashish Sabharwal ♣, Peter Clark ♣, Tushar Khot ♣♠University of Edinburgh ♣Allen Institute for AI yao.fu $@$ ed.ac.uk, haop $@$ allenai.org, ashishs $@$ allenai.org, peterc $@$ allenai.org, tushark $@$ allenai.org\\n\\n# A BSTRACT\\nWe study the task of prompting large-scale language models to perform multistep reasoning. Existing work shows that when prompted with a chain of thoughts (CoT), sequences of short sentences describing intermediate reasoning steps towards a final answer, large language models can generate new reasoning chains and predict answers for new inputs. A central question is which reasoning examples make the most effective prompts. In this work, we propose complexitybased prompting, a simple and effective example selection scheme for multi-step reasoning. We show that prompts with higher reasoning complexity , i.e., chains with more reasoning steps, achieve substantially better performance on multistep reasoning tasks over strong baselines. We further extend our complexitybased criteria from prompting (selecting inputs) to decoding (selecting outputs), where we sample multiple reasoning chains from the model, then choose the majority of generated answers from complex reasoning chains (over simple chains). When used to prompt GPT-3 and Codex, our approach substantially improves multi-step reasoning accuracy and achieves new state-of-the-art (SOTA) performance on three math benchmarks (GSM8K, MultiArith, and MathQA) and two BigBenchHard tasks (Date Understanding and Penguins), with an average $+5.3$ and up to $+18$ accuracy improvements. Compared with existing example selection schemes like manual tuning or retrieval-based selection, selection based on reasoning complexity is intuitive, easy to implement, and annotation-efficient. Further results demonstrate the robustness of performance gains from complex prompts under format perturbation and distribution shift.', 'reference': '[8] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 0'}, 9: {'id': 9, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[9] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Guiding Large Language Models Via Directional Stimulus Prompting.', 'content': \"# 3.3 Chain-of-Thought reasoning\\nWhile current methods primarily use general task-specific prompts, LLMs show sensitivity to them. Studies [ 69 ,26 ,79 ] demonstrate that LLMs can vary in performance based on the prompt used. As a result, much of the previous work has centered on manually [ 56 ] or automatically [ 61 ,79 ]crafting better prompts. However, these efforts mainly focus on task-specific prompts, which may not be optimal for every instance of a task. In our experiment, we employ our approach to generate instance-specific trigger prompts to elicit Chain-of-Thought (CoT) reasoning. Specifically, we train a policy model ( t5-base ) to generate instance-specific CoT trigger prompts, such as “ Let’s think step by step ”, to optimally prompt varying samples.  \\n\\nDataset and evaluation We adopted the experimental setup from previous work [ 26 ,79 ], where we tested zero-shot CoT reasoning abilities of InstructGPT ( text-davinci-002 ) with different trigger prompts. There are 600 examples in the MultiArith dataset [ 57 ], which we divided into 300/50/250 for training/validation/test set. As for the AQuA dataset [ 35 ], we use the standard test set with 254 samples, 300 samples from the standard training set for our training, and 100 samples for the standard validation set for our validation. We report the reasoning accuracy.  \\n\\nTable 2: Zero-shot chain of thoughts performance of InstructGPT ( text-davinci-002 ) with different prompts. ${}^{*}\\\\mathrm{Our}$ approach trains a policy model to generate instance-specific prompt triggers, which are compared to the task-specific prompts in [26, 79].   \\n\\n\\n<html><body><table><tr><td>No.</td><td>Category</td><td>Chain-of-Thought Trigger Prompt</td><td>MultiArith</td><td>AQuA</td></tr><tr><td>1</td><td>Human-Designed</td><td>Let's thinkstepby step.</td><td>79.6</td><td>31.9</td></tr><tr><td>2</td><td></td><td>Weshouldthinkaboutthisstepbystep.</td><td>81.2</td><td>28.7</td></tr><tr><td>3</td><td></td><td>First,</td><td>78.0</td><td>38.2</td></tr><tr><td>4</td><td></td><td>Beforewediveintotheanswer,</td><td>54.8</td><td>27.2</td></tr><tr><td>5</td><td></td><td>Prooffollowedbytheanswer</td><td>58.4</td><td>37.8</td></tr><tr><td>6</td><td></td><td>Let'sthinkstepbystepinarealisticway.</td><td>59.6</td><td>33.9</td></tr><tr><td>7</td><td></td><td>Let's thinkstepby step usingcommon sense and knowledge.</td><td>80.0</td><td>34.3</td></tr><tr><td>8</td><td></td><td>Let'sthinklikeadetectivestepbystep.</td><td>73.6</td><td>24.0</td></tr><tr><td>9</td><td></td><td>Let'sthinkaboutthislogically.</td><td>75.2</td><td>34.7</td></tr><tr><td>10</td><td></td><td>Let'sthink stepby step.First,</td><td>78.8</td><td>32.3</td></tr><tr><td>11</td><td></td><td>Let'sthink</td><td>56.8</td><td>38.2</td></tr><tr><td>12</td><td></td><td>Let'ssolvethisproblembysplittingit intosteps.</td><td>72.4</td><td>33.2</td></tr><tr><td>13</td><td></td><td>Theansweris aftertheproof.</td><td>42.8</td><td>34.3</td></tr><tr><td>14</td><td></td><td>Let'sberealisticandthinkstepbystep.</td><td>69.6</td><td>29.9</td></tr><tr><td>15</td><td>APE [79]</td><td>Let's work this out in a stepby stepway to be surewehavetherightanswer</td><td>81.6</td><td>34.3</td></tr><tr><td>16</td><td>DSP w/ SFT</td><td>(*Generated instance-specific prompt)</td><td>75.2</td><td>35.8</td></tr><tr><td>17</td><td>DSPw/SFT+RL</td><td>(*Generated instance-specific prompt)</td><td>84.0</td><td>38.6</td></tr></table></body></html>  \\n\\nSupervised fine-tuning details For supervised fine-tuning (SFT), we first run inference on the training set with the 14 human-crafted prompts tested in [ 26 ], respectively. We then selected those prompt and query pairs which resulted in a correct CoT reasoning outcome to form the training set for SFT. These query-prompt pairs were used to train a t5-base policy model for 2 epochs, with the model input being the query instance and the target output a trigger prompt.  \\n\\nRL training details After SFT, the prompts generated by the policy model were used to trigger InstructGPT for zero-shot CoT prompting. Reasoning accuracy was utilized as the reward for reinforcement learning (RL). A reward of 1 was assigned for correct reasoning results and 0 otherwise. We conducted 20 training iterations (106k episodes), with 5 epochs per batch, a batch size of 8, and a learning rate of 2e-6. The parameters for $\\\\mathrm{KL}_{\\\\mathrm{target}}$ and $\\\\beta_{0}$ were set to 0.5 and 0.001, respectively.  \\n\\nResults We compare the performance of using our generated instance-specific prompts with using the 14 human-crafted prompts which we used as the pseudo-stimulus to constitute the training set for SFT and also the prompt automatically discovered by the APE approach [ 79 ]. Note that all these 15 prompts are general task-specific and are used for the whole test set while ours are instance-specific. The performance comparison is shown in the Table 8. As can be seen, InstructGPT’s performance varies significantly when using different task-specific prompts. Compared to the 14 task-specific human-designed prompts, DSP enhances the performance with instance-specific prompts. It also outperforms the prompt discovered by the APE approach. Solely relying on supervised fine-tuning of the policy model with the dataset comprising the 14 human-designed prompts doesn’t lead to its peak performance. After fine-tuning with RL, the policy model is encouraged to explore better instance-specific trigger prompts, further improving performance.\\n\\n# 4 Related work\\nBlack-box large language models Recent years have witnessed the emergence of LLMs such as GPT-3 [ 6 ], Codex [ 9 ], InstructGPT, ChatGPT [ 46 ], PaLM [ 10 ], and LaMDA [ 66 ], which show significant promise in the field of NLP. These LLMs typically have a large number of parameters and require vast amounts of training data. Due to their scaling, these models have exhibited many emergent abilities, such as in-context learning, few-shot prompting, chain-of-thought prompting, and instruction following [ 6 ,46 ,69 ]. However, most LLMs are not open-sourced and can only be accessed via black-box APIs, through which the users send prompt queries and receive responses.  \\n\\nWhile there exist open-source LLMs such as OPT-175B [ 73 ] and Bloom [ 58 ], their local execution and fine-tuning require significant computational resources that may be infeasible for most researchers and users. However, despite their considerable performance on various tasks, LLMs often fall short of generating outputs that fully align with desired outputs on specific downstream tasks and use cases [ 16 ,42 ,18 ]. Our approach seeks to address this limitation by introducing directional stimulus generated by a small tunable LM into the prompt to provide more fine-grained guidance and control over black-box LLMs.  \\n\\nPrompt optimization and engineering Efficiently optimizing pre-trained LMs on downstream tasks by finding optimal prompts has been a focus of prior research. One approach involves tuning soft prompts, which are continuous embedding vectors that can be optimized using gradient descent methods [ 32 ,30 ,67 ,2 ,64 ]. However, the requirements of gradients and the challenge of passing gradients and continuous prompts through black-box APIs, making them less practical for the blackbox LLMs. Researchers have also tried to seek optimal prompts by designing task-specific natural language instructions and selecting proper training samples as in-context demonstrations in the prompt. These methods include manual engineering [ 50 ,6 ,56 ], editing [ 61 ,76 ], reinforcement learning [ 13 ,39 ], and automatic generation [ 79 ]. Despite these efforts, such prompts are not always effective at steering LLMs to generate desired outputs, especially for fine-grained instance-specific behaviors that are difficult to describe using task-specific instructions and demonstration examples. To address this limitation, our approach is able to provide more fine-grained instance-specific guidance generated by a small tunable policy model optimized with supervised fine-tuning and/or reinforcement learning.  \\n\\nControllable text generation The control of language models (LMs) has been extensively studied. Early approaches fine-tuned LMs on datasets containing desired attributes [ 17 ]. [ 24 ] proposed class-conditioned LMs, generating text with predefined control codes. However, direct LM training is costly. To address this, PPLM [ 12 ] trains an attribute model and passes gradients to control generation. GeDi [ 27 ] and DExperts [ 36 ] use class-conditional distributions as generative discriminators to guide generation, reducing computation complexity. These methods require either additional LM training or internal gradients and logistics, making them not applicable to black-box LLMs. Our approach proposes a solution to control black-box LLMs by inserting directional stimulus into the input query prompt and optimizing based on the return output.  \\n\\nReinforcement learning for NLP Reinforcement learning has been successfully applied to various NLP tasks, such as syntactic parsing [ 44 ,29 ], machine translation [ 71 ,28 ], summarization [ 48 ,62 ], conversational systems [ 31 ], etc. Language models define probability distributions over tokens in their vocabulary, and the text generation problem can be naturally formulated as selecting an action in an RL setting. Therefore, there have been extensive research efforts on optimizing LMs with RL, usually by aligning them with human preferences [ 80 ,70 ,40 ,62 ]. For example, the LLM InstructGPT [ 46 ] is optimized with RL to better follow users’ instructions and intent. In contrast with these works that directly update the LLMs to align with human preferences, our work optimizes a small policy model that generates text (stimulus) to guide LLMs to generate more human-preferred output instead of directly optimizing the LLMs, bypassing the inefficient LLM’s optimization.\", 'reference': '[0] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6'}, 1: {'id': 1, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 2: {'id': 2, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[2] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 3: {'id': 3, 'title': 'Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling', 'content': '# 5 Related Work\\nIn-Context Learning is an emergent ability of LLMs as they scale up in model sizes and training data, where an LLMs can learn to perform a task from a few examples in the context (which is also referred to as few-shot prompting) [Brown et al., 2020]. It has been shown to achieve promising few-shot and even zero-shot performance on various natural language processing [Brown et al., 2020, Schick and Schütze, 2020, Perez et al., 2021] and program synthesis [Austin et al., 2021] tasks.  \\n\\nReasoning via Chain-of-Thought Prompting Chain-of-Thought (CoT) prompting is a technique that enables LLMs to perform complex reasoning tasks by prompting them with a few examples with step-by-step solutions [Wei et al., 2022, Suzgun et al., 2022]. CoT prompting has been shown to improve performance on various reasoning tasks, such as arithmetic reasoning [Wei et al., 2022, Zhou et al., 2022], symbolic reasoning [Wei et al., 2022, Zhou et al., 2022], multi-hop question answering [Press et al., 2022, Arora et al., 2022], and natural language inference [Wang et al., 2022b]. However, designing effective CoT prompts requires human experts with an understanding of both the task and the prompting technique [Zamfirescu-Pereira et al., 2023], which limits the scalability and generalizability of CoT prompting.  \\n\\nSeveral works have attempted to automate the process of CoT prompt discovery. Zhang et al. [2022] proposed to use LLMs to generate CoT solutions for diverse training questions in zero-shot and integrate the generated CoT solutions in the prompt for solving test questions. This method can be seen as a special version of our Reprompting algorithm with $M=0$ iterations, while our experiments demonstrate that Reprompting improves the CoT solutions on the training examples through iterations. Deng et al. [2022], Zhang et al. [2023] proposed to train an additional policy model to find the best prompt through reinforcement learning, but their approaches are limited to prompt optimization within a relatively small search space (i.e. it is restricted to the prompts that are either extremely short or within a small edit distance from an initial prompt). Zhou et al. [2023] proposed a method for automatically generating, scoring and selecting effective instruction messages $m$ for zero-shot chain-of-thought reasoning, which is orthogonal and can be potentially combined with our algorithm. Paranjape et al. [2023] introduced a framework that automatically retrieves demonstrations of related tasks from a task library and generates CoT solutions for the new task. However, this framework still requires collective human efforts to write demonstrations for a diverse set of tasks in the task library. In contrast, our Reprompting algorithm enables LLMs to solve complex reasoning tasks without any human guidance. Additionally, Yoran et al. [2023] proposed a multi-chain reasoning (MCR) method that prompts LLMs to combine pieces of information from multiple chains of thought to predict the final answer, which differs from our method in two ways: first, MCR combines multiple CoT solutions to the same question at test time, while Reprompting combines CoT solutions generated for different training questions before testing; second, MCR combines solutions only once, whereas Reprompting iteratively samples new solutions and recombines them. As a result, Reprompting generates effective CoT recipes from only a few training examples, resulting in improved test performance without slowing down test inference.\\n\\n# 6 Conclusion\\nWe introduce Reprompting , an automated prompt inference algorithm which, without human effort, discovers effective chain-of-thought (CoT) prompts for each task given a few question-answer pairs. On five Big-Bench Hard (BBH) tasks, prompts discovered with Reprompting consistently outperform zero-shot, few-shot, and human-written CoT prompts. Furthermore, performance of a weak LLM is significantly aided by generating the initial CoT solutions using a stronger LLM and then reprompting using the weak LLM to optimize the prompt for itself. Overall, Reprompting achieves up to $+17$ point improvements over the previous state-of-the-art on BBH tasks, which was based on human-written prompts. Our results suggest that LLM comparisons can be highly sensitive to CoT selection, further emphasizing the need for automatic prompt discovery and optimization using algorithms such as Reprompting .\\n\\n\\n\\n# A Compute and Resources\\nWe use the OpenAI APIs for all our experiments. The average cost of running Reprompting per task (using either the Gibbs sampling or the greedy version) is around $\\\\mathbb{S}80$ (in US dollars) for gpt-3.5-turbo and $\\\\mathbb{S}800$ for text-davinci-003 based on the standard pricing.\\n\\n# BAdditional Illustrations\\nOn sensitivity to initialization We have shown that Reprompting can be sensitive to initial zeroshot recipe generation. In each task we tested, armed with a suitable prompt InstructGPT could reach test set accuracy equalling or besting ChatGPT. However, such a prompt could not be discovered if the prompt recombination and evolution through Reprompting was started with initial prompts generated by InstructGPT itself. Fig. B.1 points to a likely explanation: ChatGPT can generate a wider range of useful recipes, and whether these initial recipes lead to the correct solution or not, InstructGPT can follow them and, through Reprompting , refine them and correct them. Thus, as we have shown in our experiments, with good initialization, LLMs that may appear inferior based on their zero-shot performance may end up performing just as well or better than LLMs whose zero-shot performance is more encouraging. It would be interesting to see if Reprompting can use other LLMs in initialization to perform even better, or if the humans can be put back into the loop to provide some initial recipes, or some generic instructions on how to generate them.  \\n\\nOn transferability of discovered recipes The fact that $L L M_{1}$ (ChatGPT) can point $L L M_{2}$ (InstructGPT) in the right direction(s) for prompt discovery does not mean that the discovered prompts, having been optimized for training performance on $L L M_{2}$ will perform well when used to prompt $L L M_{1}$ .In fact, Table 2 in the main text indicates that the discovered CoT recipes that work for one model may not necessarily work for other models. For example, in the case of Temporal Sequences , the test performance is achieved with a prompt trained with InstructGPT (after initialization with ChatGPT as $L L M_{1}$ ). But when using that prompt in testing with ChatGPT the performance is by $18\\\\%$ lower. Figure B.2) illustrates the solution strategy that emerged from training: The time intervals that need to be reasoned over are sorted, and among the sorted list, the missing interval was inserted as the possible interval when the person in question could have performed an activity. Then the answer is generated. InstructGPT follows this procedure with accuracy over $99\\\\%$ , but ChatGPT sometimes skips the generation of that crucial line (for this recipe) with the missing interval within the timeline.  \\n\\n  \\nFigure B.2: An example on Temporal Sequences where ChatGPT underperforms InstructGPT using the same CoT prompt optimized for InstructGPT via Reprompting (using ChatGPT+InstructGPT). ChatGPT fails to correctly execute the recipe as it skips a key step (the blue underlined text from InstructGPT) to reach the final answer. (The illustration does not show the full prompt that precedes the puzzle $x$ for brevity; it consists of 5 training examples with worked-out solutions that all follow the same strategy of solving these types of problems.)  \\n\\nTherefore, among initial “ideas” from ChatGPT, some can be refined to work well for InstructGPT, and others can be refined to work well for ChatGPT itself, as the best performance of ChatGPT (using the CoT prompt optimized for itself) is only slightly lower than that of the ChatGPT+InstructGPT combination.  \\n\\nThese results suggest that fair comparison between different LLMs may be difficult, as one needs to optimize the CoT prompt for each model, and that optimization is typically non-trivial.  \\n\\nHow do the model-generated CoT recipes differ from human-written ones? In the main text, We evaluated the performance of the CoT recipes generated using Reprompting and contrasted it with human-written ones in Suzgun et al. [2022]. As illustrated by the example recipes in Figure B.3, the model-generated CoT recipes share some similarities to human-written ones on some tasks (such as Logical Deduction ), but differs on other tasks. For instance, on Object Counting , the CoT generated using Reprompting computes the total number of objects by incrementing the count one by one (e.g. adding 4 to the count 5 by “ [6 ,7 ,8 ,9 ]”), while in the human written recipe, it computes the addition through an arithmetic formula at the end. On the task of Penguins in a Table , the automatically generated CoT inserts a piece of code to solve the task, whereas the CoT crafted by humans employs natural language to reason over the table. The full prompts that yield the best performance on each task are provided in the supplementary material.  \\n\\n  \\nFigure B.3: Examples of the best-performing CoT recipes inferred via Reprompting on Logical Deduction (best score: 66.3), Geometric Shapes (best score: 73.2), Object Counting (best score: 99.6), Penguins in a Table (best score: 81.5), Temporal Sequences (best score: 99.2), and Causal Judgement (best score: 68.4).', 'reference': '[3] Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling, ICML, 2024, chunk 3'}, 4: {'id': 4, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[4] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 5: {'id': 5, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# COMPLEXITY -B ASED PROMPTING FOR MULTI -STEP REASONING\\nYao $\\\\mathbf{F}\\\\mathbf{u}^{\\\\star}$ $\\\\mathbf{\\\\ddot{\\\\rho}}\\\\mathbf{Hao}\\\\ \\\\mathbf{Peng}^{\\\\pmb{\\\\alpha}}$ , Ashish Sabharwal ♣, Peter Clark ♣, Tushar Khot ♣♠University of Edinburgh ♣Allen Institute for AI yao.fu $@$ ed.ac.uk, haop $@$ allenai.org, ashishs $@$ allenai.org, peterc $@$ allenai.org, tushark $@$ allenai.org\\n\\n# A BSTRACT\\nWe study the task of prompting large-scale language models to perform multistep reasoning. Existing work shows that when prompted with a chain of thoughts (CoT), sequences of short sentences describing intermediate reasoning steps towards a final answer, large language models can generate new reasoning chains and predict answers for new inputs. A central question is which reasoning examples make the most effective prompts. In this work, we propose complexitybased prompting, a simple and effective example selection scheme for multi-step reasoning. We show that prompts with higher reasoning complexity , i.e., chains with more reasoning steps, achieve substantially better performance on multistep reasoning tasks over strong baselines. We further extend our complexitybased criteria from prompting (selecting inputs) to decoding (selecting outputs), where we sample multiple reasoning chains from the model, then choose the majority of generated answers from complex reasoning chains (over simple chains). When used to prompt GPT-3 and Codex, our approach substantially improves multi-step reasoning accuracy and achieves new state-of-the-art (SOTA) performance on three math benchmarks (GSM8K, MultiArith, and MathQA) and two BigBenchHard tasks (Date Understanding and Penguins), with an average $+5.3$ and up to $+18$ accuracy improvements. Compared with existing example selection schemes like manual tuning or retrieval-based selection, selection based on reasoning complexity is intuitive, easy to implement, and annotation-efficient. Further results demonstrate the robustness of performance gains from complex prompts under format perturbation and distribution shift.', 'reference': '[5] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 0'}, 6: {'id': 6, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[6] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 7: {'id': 7, 'title': 'Large Language Models Are Zero-Shot Reasoners', 'content': '# 2 Background\\nWe briefly review the two core preliminary concepts that form the basis of this work: the advent of large language models (LLMs) and prompting, and chain of thought (CoT) prompting for multi-step reasoning.  \\n\\nLarge language models and prompting A language model (LM), is a model that looks to estimate the probability distribution over text. Recently, scaling improvements through larger model sizes (from a few million [Merity et al., 2016] to hundreds of millions [Devlin et al., 2019] to hundreds of billions [Brown et al., 2020] parameters) and larger data (e.g. webtext corpora [Gao et al., 2020]) have enabled pre-trained large language models (LLMs) to be incredibly adept at many downstream NLP tasks. Besides the classic “pre-train and fine-tune” paradigm [Liu et al., 2021b], models scaled to $^{100\\\\mathrm{B}+}$ parameters exhibit properties conducive to few-shot learning [Brown et al., 2020], by way of in context learning, where one can use a text or template known as a prompt to strongly guide the generation to output answers for desired tasks, thus beginning an era of “pre-train and prompt” [Liu et al., 2021a]. In work, we call such prompts with explicit conditioning on few task examples as few-shot prompts, and other template-only prompts as zero-shot prompts.  \\n\\nChain of thought prompting Multi-step arithmetic and logical reasoning benchmarks have particularly challenged the scaling laws of large language models [Rae et al., 2021]. Chain of thought (CoT) prompting [Wei et al., 2022], an instance of few-shot prompting, proposed a simple solution by modifying the answers in few-shot examples to step-by-step answers, and achieved significant boosts in performance across these difficult benchmarks, especially when combined with very large language models like PaLM [Chowdhery et al., 2022]. The top row of Figure 1 shows standard few-shot prompting against (few-shot) CoT prompting. Notably, few-shot learning was taken as a given for tackling such difficult tasks, and the zero-shot baseline performances were not even reported in the original work [Wei et al., 2022]. To differentiate it from our method, we call Wei et al. [2022] as Few-shot-CoT in this work.\\n\\n# 3 Zero-shot Chain of Thought\\nWe propose Zero-shot-CoT, a zero-shot template-based prompting for chain of thought reasoning. It differs from the original chain of thought prompting [Wei et al., 2022] as it does not require step-by-step few-shot examples, and it differs from most of the prior template prompting [Liu et al., 2021b] as it is inherently task-agnostic and elicits multi-hop reasoning across a wide range of tasks with a single template. The core idea of our method is simple, as described in Figure 1: add Let’s think step by step , or a a similar text (see Table 5), to extract step-by-step reasoning.\\n\\n# 3.1 Two-stage prompting\\nWhile Zero-shot-CoT is conceptually simple, its subtlety is that it uses prompting twice, as explained in Figure 2. This is due to the fact that the zero-shot baseline (see the bottom-left in Figure 1) already uses prompting in the form of “The answer is”, to extract the answers in correct formats. Few-shot prompting, standard or CoT, avoids needing such answer-extraction prompting by explicitly designing the few-shot example answers to end in such formats (see the top-right in Figure 1). In summary, Few-shot-CoT [Wei et al., 2022] requires careful human engineering of a few prompt examples with specific answer formats per task, while Zero-shot-CoT does not require such engineering but requires prompting LLMs twice.  \\n\\n1st prompt: reasoning extraction In this step we first modify the input question xinto a prompt using a simple template “Q: [X] . A: [Z] ”, where [X] is an input slot for xand [T] is an slot for hand-crafted trigger sentence tthat would extract chain of though to answer the question x. For example, if we use “Let’s think step by step” as a trigger sentence, the prompt would be “Q: [X] . A: Let’s think step by step.”. Prompted text $\\\\mathbf{x}^{\\\\prime}$ is then fed into a language model and generate subsequent sentence $\\\\mathbf{z}$ . We can use any decoding strategy, but we used greedy decoding throughout the paper for the simplicity.  \\n\\n  \\nFigure 2: Full pipeline of Zero-shot-CoT as described in $\\\\S\\\\ 3$ : we first use the first “reasoning” prompt to extract a full reasoning path from a language model, and then use the second “answer” prompt to extract the answer in the correct format from the reasoning text.  \\n\\n2nd prompt: answer extraction In the second step, we use generated sentence $\\\\mathbf{z}$ along with prompted sentence $\\\\mathbf{x}^{\\\\prime}$ to extract the final answer from the language model. To be concrete, we simply concatenate three elements as with “ $[\\\\mathtt{X}^{\\\\prime}]$ [Z] [A] ”: $[\\\\mathtt{X}^{\\\\prime}]$ for 1st prompt $\\\\mathbf{x}^{\\\\prime}$ ,[Z] for sentence generated at the first step, and [A] for a trigger sentence to extract answer. The prompt for this step is self-augmented , since the prompt contains the sentence ${\\\\bf z}$ generated by the same language model. In experiment, we use slightly different answer trigger depends on the answer format. For example, we use “Therefore, among A through E, the answer is” for multi-choice QA, and “Therefore, the answer (arabic numerals) is” for math problem requiring numerical answer. See Appendix A.5 for the details. Finally, the language model is fed the prompted text as input to generate sentences $\\\\hat{\\\\mathbf{y}}$ and parse the final answer.', 'reference': '[7] Large Language Models Are Zero-Shot Reasoners, NeurIPS, 2022, chunk 1'}, 8: {'id': 8, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[8] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 9: {'id': 9, 'title': 'Use Your INSTINCT: INSTruction Optimization for LLMs Using Neural Bandits Coupled with Transformers', 'content': '# 4.2 I MPROVING ZERO -S HOT CHAIN -OF -T HOUGHT PROMPT\\nChain-of-thought (CoT) reasoning has been found to be an effective technique to boost the performance of LLMs in complex tasks that require multiple steps of reasoning (Wei et al., 2022). The work of Kojima et al. (2022) has discovered that the performance of LLMs in complicated reasoning tasks can be significantly improved by simply prepending the zero-shot CoT instruction \"Let’s think step by step.\" to the questions, which outperforms other manually designed instructions. Here we show that our INSTINCT algorithm can further improve over this zero-shot CoT instruction across multiple tasks in Table 3. We defer our detailed experimental design to App. C.3.  \\n\\n<html><body><table><tr><td>Method</td><td>Dataset</td><td>BestZero-ShotCoTInstruction</td><td>Score</td></tr><tr><td>Kojima et al.(2022) InstructZero INSTINCT (ours)</td><td>GSM8K GSM8K GSM8K</td><td>Let\\'sthinkstepbystep. Let\\'suse theinstructiontosolve theproblem. Let\\'s thinkaboutit.</td><td>0.71797 0.74299 0.74526</td></tr><tr><td>Kojima et al.(2022) InstructZero INSTINCT (ours)</td><td>AQUA-RAT AQUA-RAT AQUA-RAT</td><td>Let\\'s think stepbystep. Let\\'sbreakdowntheproblem. Ihaveanewsolution.</td><td>0.52362 0.54331 0.54724</td></tr><tr><td>Kojima et al.(2022) InstructZero INSTINCT (ours)</td><td>SVAMP SVAMP SVAMP</td><td>Let\\'sthinkstepbystep. Let\\'suse theequation. Let\\'suse ourbrains.</td><td>0.7625 0.795 0.81</td></tr></table></body></html>\\n\\nTable 3: The best zero-shot CoT instructions found by different algorithms and their scores.', 'reference': '[9] Use Your INSTINCT: INSTruction Optimization for LLMs Using Neural Bandits Coupled with Transformers, ICML, 2024, chunk 4'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:06\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[1] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 2: {'id': 2, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 3: {'id': 3, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# COMPLEXITY -B ASED PROMPTING FOR MULTI -STEP REASONING\\nYao $\\\\mathbf{F}\\\\mathbf{u}^{\\\\star}$ $\\\\mathbf{\\\\ddot{\\\\rho}}\\\\mathbf{Hao}\\\\ \\\\mathbf{Peng}^{\\\\pmb{\\\\alpha}}$ , Ashish Sabharwal ♣, Peter Clark ♣, Tushar Khot ♣♠University of Edinburgh ♣Allen Institute for AI yao.fu $@$ ed.ac.uk, haop $@$ allenai.org, ashishs $@$ allenai.org, peterc $@$ allenai.org, tushark $@$ allenai.org\\n\\n# A BSTRACT\\nWe study the task of prompting large-scale language models to perform multistep reasoning. Existing work shows that when prompted with a chain of thoughts (CoT), sequences of short sentences describing intermediate reasoning steps towards a final answer, large language models can generate new reasoning chains and predict answers for new inputs. A central question is which reasoning examples make the most effective prompts. In this work, we propose complexitybased prompting, a simple and effective example selection scheme for multi-step reasoning. We show that prompts with higher reasoning complexity , i.e., chains with more reasoning steps, achieve substantially better performance on multistep reasoning tasks over strong baselines. We further extend our complexitybased criteria from prompting (selecting inputs) to decoding (selecting outputs), where we sample multiple reasoning chains from the model, then choose the majority of generated answers from complex reasoning chains (over simple chains). When used to prompt GPT-3 and Codex, our approach substantially improves multi-step reasoning accuracy and achieves new state-of-the-art (SOTA) performance on three math benchmarks (GSM8K, MultiArith, and MathQA) and two BigBenchHard tasks (Date Understanding and Penguins), with an average $+5.3$ and up to $+18$ accuracy improvements. Compared with existing example selection schemes like manual tuning or retrieval-based selection, selection based on reasoning complexity is intuitive, easy to implement, and annotation-efficient. Further results demonstrate the robustness of performance gains from complex prompts under format perturbation and distribution shift.', 'reference': '[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 0'}, 4: {'id': 4, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[4] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}, 5: {'id': 5, 'title': 'Successive Prompting for Decomposing Complex Questions', 'content': '# 5 Related Work\\nPrompting methods Prompting was introduced as a way to test the reasoning capabilities of large language models ( Brown et al. ,2020 ). In followup works ( Schick ,2022 ;Chowdhery et al. ,2022 ;Marasovi´c et al. ,2021 ) prompting techniques have been used as a mechanism to supervise the model decision with few demonstrations as a conditioning context to guide its predictions on an unseen example. Works like Chain-of-Thought reasoning ( Wei et al. ,2022 ;Zelikman et al. ,2022 ) especially focus on compositional questions where they provide a chain of reasoning as demonstrations. In concurrent work, Least-to-Most prompting ( Zhou et al. ,2022 ) takes a similar view as ours to break down the problem into sub-problems. However, in Successive Prompting the question decomposition and answering stages are interleaved, unlike Least-toMost where the problem is first reduced into subproblem and then executed in a sequence. In our method, the next question prediction has access to previously answered sub-questions, which is useful in questions that need long chain referencing. Other contemporaneous works ( Press et al. ,2022 ;Khot et al. ,2022 ) use very large language models (more than twice the size we used) and show better few-shot generalization. Works like Perez et al. (2021 ) have shown the importance of having the right in-context examples for downstream performance leading to works that learn to retrieve relevant in-context examples ( Rubin et al. ,2021 ).  \\n\\nNon-symbolic methods Most non-symbolic methods are sequence-to-sequence models trained on a large amount of question answering data ( Khashabi et al. ,2020 ;Yoran et al. ,2021 ).  \\n\\nSymbolic methods Neural module networks like approaches parse complex questions into a prespecified grammar and learn neural components to handle symbolic mathematical operations ( Gupta et al. ,2020 ;Chen et al. ,2020 ;Nye et al. ,2021 )which are recursively executed. State-of-the-art models on DROP, however, use a combination of BERT-based contextual models along with a calculator that performs discrete operations ( Andor et al. ,2019 ;Segal et al. ,2020 ;Hu et al. ,2019 ). Works like Text Modular networks ( Khot et al. ,2021 ) and MRKL ( Karpas et al. ,2022 ) are closest to our work. However, they are limited in the terms of types of simple questions they can answer (single-span only) and the complexity of reasoning they can do (single-order only). TMNs, additionally, use a classifier that scores the generated chains module and filters out incorrect question decompositions, while we use contrastive estimation to learn a better question decomposer and as a result do not need a chain scorer.\\n\\n# 6 Conclusion\\nWe present a way to successively decompose complex questions into simple QA pairs, which allows for modular QD and QA systems that can be trained and queried independently. When performing in-context learning, we showed that successive prompting yields an improvement of 4.6 F1 over chain-of-thought prompting. When replacing just the in-context QA module with a fine-tuned one, which is adept at handling list type questions, we further improve the overall performance by 9.5 F1. We believe that modular systems that decompose and delegate tasks to the most appropriate model, whether that is a large LM or a tailored component, are more e ffective at solving complex tasks than trying to have a large LM solve the entire task on its own. Successive prompting shows one way this decomposition and delegation can be done.\\n\\n# Acknowledgements\\nWe would like to thank Anthony Chen, Catarina Belem and the anonymous reviewers for the discussions and feedback. This material is based upon work sponsored in part by the DARPA MCS program under Contract No. N660011924033 with the United States O ffice Of Naval Research, in part by funding by AI2 and NSF IIS-1817183. We would also like to thank Hasso Plattner Institute(HPI) for supporting the first author through UCI-HPI fellowship. The views in this work are of authors and not the sponsors.\\n\\n# Limitations\\nWe propose a way to decompose complex questions into interpretable simple QA pairs as latent steps that get successively asked and answered by large pretrained models. The notion of performing complex tasks by iteratively finding and then filling information needs is very general, but we have only shown the applicability of one specific version of this idea in one specific setting. There are many potential challenges in applying successive prompting more broadly. The biggest is that it requires at least some decomposition data, which may be hard or even impossible to obtain. Some complex questions are not easily decomposed, and some domains can be very challenging to write synthetic data generators for. We were able to generate synthetic data that covered most of the reasoning types in DROP, but other kinds of complex questions would not be covered by our generator (e.g., questions that require commonsense or causal reasoning).  \\n\\nThere is also significant di fficulty in choosing a level of granularity for decomposition. If a large pretrained model can directly answer a question as complex as “What was Barth’s second field goal?”, we should let the model answer the question instead of trying to decompose it further. The right granularity for the decomposition thus depends on the capabilities of the underlying model, and those capabilities are rapidly changing as newer and larger pretrained models are released. There is the possibility that newer model iterations will not need any decomposition to answer the complex questions covered by our synthetic data generator, making that generator obsolete. However, it seems unlikely that pretrained models will be able to handle all complex scenarios in the near future, so the ideas of successive prompting and generating synthetic data to bridge reasoning gaps should still be applicable even when our particular application of them becomes obsolete.', 'reference': '[5] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6'}, 6: {'id': 6, 'title': 'Guiding Large Language Models Via Directional Stimulus Prompting.', 'content': '# Guiding Large Language Models via Directional Stimulus Prompting\\nZekun $\\\\mathbf{Li}^{1\\\\ast}$ ∗, Baolin Peng 2 , Pengcheng $\\\\mathbf{H}\\\\mathbf{e}^{2}$ , Michel Galley 2 , Jianfeng $\\\\mathbf{Gao}^{2}$ †, Xifeng Yan 1 †University of California, Santa Barbara 1 Microsoft 2 {zekunli, xyan}@cs.ucsb.edu {bapeng,penhe,mgalley,jfgao}@microsoft.com\\n\\n# Abstract\\nWe introduce Directional Stimulus Prompting , a novel framework for guiding black-box large language models (LLMs) toward specific desired outputs. Instead of directly adjusting LLMs, our method employs a small tunable policy model (e.g., T5) to generate an auxiliary directional stimulus prompt for each input instance. These directional stimulus prompts act as nuanced, instance-specific hints and clues to guide LLMs in generating desired outcomes, such as including specific keywords in the generated summary. Our approach sidesteps the challenges of direct LLM tuning by optimizing the policy model to explore directional stimulus prompts that align LLMs with desired behaviors. The policy model can be optimized through 1) supervised fine-tuning using labeled data and 2) reinforcement learning from offline or online rewards based on the LLM’s output. We assess our method across summarization, dialogue response generation, and chain-of-thought reasoning tasks. Our experiments demonstrate that the framework consistently improves LLMs’ (e.g., ChatGPT, Codex, InstructGPT) performance on these supervised tasks using minimal labeled data. Notably, using just 80 dialogues on the MultiWOZ dataset, our approach enhances ChatGPT’s performance by an impressive $41.4\\\\%$ , matching or surpassing some fully supervised start-of-the-art models. Additionally, the instance-specific chain-of-thought prompt generated by our approach improves InstructGPT’s reasoning accuracy compared to human-crafted or automatically generated prompts. The code and data are publicly available.', 'reference': '[6] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 0'}, 7: {'id': 7, 'title': 'Empower Nested Boolean Logic Via Self-Supervised Curriculum Learning', 'content': '# 8 Conclusion\\nThis paper provides a quantified analysis on the multi-nested boolean logic. We flag the deficiency in the state-of-the-art language models in terms of such basic capability, which will inevitably cause pitfalls in dealing with more complex reasoning tasks. For this, we propose Curriculum Logical Reasoning , a new self-supervised learning method to empower language models with foundational logical capability. We also show that our idea can act as a cornerstone learning method for general logical reasoning.\\n\\n# Limitations\\nWe cannot exhaust all the arrangements of curriculum to perform C LR , which could potentially achieve even better performances. We have discussed the potential risk of chain-of-though as secondary contribution of our work, which will be interesting to study in the future. Our method to introduce nested boolean logic is general, while our experiments are based on one source. Another option is to collect data from more general corpus or specific domains of interest, which is promising. Eventually, we do not have enough resources to run large language models above 7b.', 'reference': '[7] Empower Nested Boolean Logic Via Self-Supervised Curriculum Learning, EMNLP, 2023, chunk 5'}, 8: {'id': 8, 'title': 'Semantic Residual Prompts for Continual Learning', 'content': '# 5 Conclusion\\nThis paper presents STAR-Prompt, a prompting method for Continual Learning based on three main novelties. First, we strengthen the stability of the prompt selection mechanism using a foundation model and two levels of prompt tuning. Second, we replace standard prompt token concatenation with additive residuals, which transfer semantics into its MLP layers. Finally, we use a simple generative replay based on a multi-modal representation of the feature distributions. Each part of STAR-Prompt brings a significant contribution, leading it to outperform the CL state of the art.', 'reference': '[8] Semantic Residual Prompts for Continual Learning, ECCV, 2024, chunk 7'}, 9: {'id': 9, 'title': 'Boosting the Power of Small Multimodal Reasoning Models to Match Larger Models with Self-Consistency Training', 'content': '# Boosting the Power of Small Multimodal Reasoning Models to Match Larger Models with Self-Consistency Training\\nCheng Tan 1 ,,∗, Jingxuan $\\\\mathrm{Wei^{2*}}$ , Zhangyang Gao 1 ,,∗, Linzhuang Sun 2 ,Siyuan $\\\\mathrm{Li^{1,3,4}}$ , Xihong Yang 3 ,, Stan Z. Li 3 ,1 Zhejiang University, 2 Shenyang Institute of Computing Technology, Chinese Academy of Sciences 3 AI Lab, Research Center for Industries of the Future, Westlake University 4 Institute of Advanced Technology, Westlake Institute for Advanced Study  \\n\\n\\\\* {tancheng,gaozhangyang,wulirong,xuyongjie,xiajun,lisiyuan,stan.zq.li }@westlake.edu.cn\\n\\n# Abstract\\nMultimodal reasoning is a challenging task that requires models to reason across multiple modalities to answer questions. Existing approaches have made progress by incorporating language and visual modalities into a two-stage reasoning framework, separating rationale generation from answer inference. However, these approaches often fall short due to the inadequate quality of the generated rationales. In this work, we delve into the importance of rationales in model reasoning. We observe that when rationales are completely accurate, the model’s accuracy significantly improves, highlighting the need for high-quality rationale generation. Motivated by this, we propose MC-CoT, a selfconsistency training strategy that generates multiple rationales and answers, subsequently selecting the most accurate through a voting process. This approach not only enhances the quality of generated rationales but also leads to more accurate and robust answers. Through extensive experiments, we demonstrate that our approach significantly improves model performance across various benchmarks. Remarkably, we show that even smaller base models, when equipped with our proposed approach, can achieve results comparable to those of larger models, illustrating the potential of our approach in harnessing the power of rationales for improved multimodal reasoning. The code is available at github.com/chengtan9907/mc-cot .\\n\\n# 1. Introduction\\nRecent advances in large language models [ 2 ,4 ,8 ,14 ,15 ,43 ,44 ,47 ,53 ,54 ,68 ] have led to the exploration of Chain-of-Thought (CoT) prompting [ 6 ,56 ,57 ,62 ]. This approach, which directs the model to systematically unravel rationales before providing answers, rather than responding directly, has showcased the model’s impressive efficacy across a variety of natural language processing (NLP) tasks. Moreover, the advent of CoT prompting has catalyzed a plethora of research endeavors delving into the reasoning prowess of large language models. A diverse range of Chain-of-Thought strategies has been investigated, including the voting-facilitated CoT-SC [ 56 ], the transition from chain-like to tree-like thinking with Tree-of-Thoughts [ 62 ], and further expansion into graph-structured thinking [ 6 ].  \\n\\n  \\nFigure 1. An example of multimodal reasoning that answers the question by reasoning across both vision and language modalities.  \\n\\nWhile CoT reasoning has been thoroughly established in the realm of language models, its foray into the vast and intricate landscape of multimodal reasoning is still in its infancy. As shown in Figure 1 , Multimodal reasoning [ 5 ,11 ,17 ,19 ,20 ,23 ,31 –34 ,37 ,39 ,40 ,50 ,51 ,59 ,63 ,66 ], which inherently involves the seamless fusion of information from disparate modalities such as text and images, presents unique challenges. The process of extracting, correlating, and generating rationales across multiple modalities is decidedly more complex than the tasks encountered in a solely text-based modality. A recent seminal work, Multimodal-CoT [ 69 ], has pioneered the application of the Chain-of-Thought approach to multimodal reasoning tasks. This approach encompasses a two-stage framework that distinctively separates rationale generation from answer inference. By obliging the model to generate rationales prior to answering questions, Multimodal-CoT mirrors the language-only CoT prompting strategy, thus enabling the model to reason across multiple modalities.  \\n\\nDespite Multimodal-CoT has made promising strides in the realm of multimodal reasoning, as evidenced in Figure 2 , its improvements over the no-rationale baseline are still limited. Moreover, compared to the ground-truth rationale, the predicted rationale falls short, often yielding results that lack relevance to the posed question. This discrepancy primarily stems from the quality of the generated rationales, highlighting the crucial role of rationale quality in the success of the chain-of-thought reasoning process.  \\n\\n  \\nFigure 2. The comparison of answer accuracy on ScienceQA using the Multimodal-CoT framework with no rationale, predicted rationales, and ground-truth rationales.  \\n\\nIn light of the above observations, it becomes evident that the potency of the Chain-of-Thought reasoning in a multimodal scenario is intrinsically tethered to the accuracy of the rationales generated. A high-quality rationale not only sheds light on the model’s thought process but also paves the way for more precise answers. This leads to a critical question: how can we develop a strategy to enhance the quality of these rationales, and consequently, improve the overall performance of multimodal CoT?  \\n\\nOur study is driven by the hypothesis that enhancing the quality of rationale generation can significantly improve the model’s reasoning capabilities and overall performance. To this end, we introduce a simple yet effective strategy that capitalizes on the inherent variability of deep neural models during training, particularly stemming from mechanisms like dropout. Our approach involves having the model generate multiple rationales and then voting for the most consistent words across these rationales to yield a more refined and accurate rationale. The same voting mechanism is then applied to answer generation, further boosting the model’s confidence in its predictions. It is important to note that the inference phase remains completely unaffected by this voting mechanism and continues to operate in the same manner as it does in the original Multimodal CoT framework. Through this approach, we aim to facilitate a more robust and accurate multimodal reasoning ability.  \\n\\nExtensive experiments across ScienceQA [ 37 ] and AOKVQA [ 49 ] benchmark datasets demonstrate the efficacy of our proposed approach. Notably, by improving the rationale quality, even smaller models equipped with our strategy manifest performance metrics that rival, and at times surpass, those of considerably larger models. This not only confirms the efficacy of our rationale refinement strategy but also opens up a promising avenue where smaller, more efficient models can be rendered competitive in the multimodal reasoning landscape.', 'reference': '[9] Boosting the Power of Small Multimodal Reasoning Models to Match Larger Models with Self-Consistency Training, ECCV, 2024, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[1] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 2: {'id': 2, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 3: {'id': 3, 'title': 'Successive Prompting for Decomposing Complex Questions', 'content': '# 5 Related Work\\nPrompting methods Prompting was introduced as a way to test the reasoning capabilities of large language models ( Brown et al. ,2020 ). In followup works ( Schick ,2022 ;Chowdhery et al. ,2022 ;Marasovi´c et al. ,2021 ) prompting techniques have been used as a mechanism to supervise the model decision with few demonstrations as a conditioning context to guide its predictions on an unseen example. Works like Chain-of-Thought reasoning ( Wei et al. ,2022 ;Zelikman et al. ,2022 ) especially focus on compositional questions where they provide a chain of reasoning as demonstrations. In concurrent work, Least-to-Most prompting ( Zhou et al. ,2022 ) takes a similar view as ours to break down the problem into sub-problems. However, in Successive Prompting the question decomposition and answering stages are interleaved, unlike Least-toMost where the problem is first reduced into subproblem and then executed in a sequence. In our method, the next question prediction has access to previously answered sub-questions, which is useful in questions that need long chain referencing. Other contemporaneous works ( Press et al. ,2022 ;Khot et al. ,2022 ) use very large language models (more than twice the size we used) and show better few-shot generalization. Works like Perez et al. (2021 ) have shown the importance of having the right in-context examples for downstream performance leading to works that learn to retrieve relevant in-context examples ( Rubin et al. ,2021 ).  \\n\\nNon-symbolic methods Most non-symbolic methods are sequence-to-sequence models trained on a large amount of question answering data ( Khashabi et al. ,2020 ;Yoran et al. ,2021 ).  \\n\\nSymbolic methods Neural module networks like approaches parse complex questions into a prespecified grammar and learn neural components to handle symbolic mathematical operations ( Gupta et al. ,2020 ;Chen et al. ,2020 ;Nye et al. ,2021 )which are recursively executed. State-of-the-art models on DROP, however, use a combination of BERT-based contextual models along with a calculator that performs discrete operations ( Andor et al. ,2019 ;Segal et al. ,2020 ;Hu et al. ,2019 ). Works like Text Modular networks ( Khot et al. ,2021 ) and MRKL ( Karpas et al. ,2022 ) are closest to our work. However, they are limited in the terms of types of simple questions they can answer (single-span only) and the complexity of reasoning they can do (single-order only). TMNs, additionally, use a classifier that scores the generated chains module and filters out incorrect question decompositions, while we use contrastive estimation to learn a better question decomposer and as a result do not need a chain scorer.\\n\\n# 6 Conclusion\\nWe present a way to successively decompose complex questions into simple QA pairs, which allows for modular QD and QA systems that can be trained and queried independently. When performing in-context learning, we showed that successive prompting yields an improvement of 4.6 F1 over chain-of-thought prompting. When replacing just the in-context QA module with a fine-tuned one, which is adept at handling list type questions, we further improve the overall performance by 9.5 F1. We believe that modular systems that decompose and delegate tasks to the most appropriate model, whether that is a large LM or a tailored component, are more e ffective at solving complex tasks than trying to have a large LM solve the entire task on its own. Successive prompting shows one way this decomposition and delegation can be done.\\n\\n# Acknowledgements\\nWe would like to thank Anthony Chen, Catarina Belem and the anonymous reviewers for the discussions and feedback. This material is based upon work sponsored in part by the DARPA MCS program under Contract No. N660011924033 with the United States O ffice Of Naval Research, in part by funding by AI2 and NSF IIS-1817183. We would also like to thank Hasso Plattner Institute(HPI) for supporting the first author through UCI-HPI fellowship. The views in this work are of authors and not the sponsors.\\n\\n# Limitations\\nWe propose a way to decompose complex questions into interpretable simple QA pairs as latent steps that get successively asked and answered by large pretrained models. The notion of performing complex tasks by iteratively finding and then filling information needs is very general, but we have only shown the applicability of one specific version of this idea in one specific setting. There are many potential challenges in applying successive prompting more broadly. The biggest is that it requires at least some decomposition data, which may be hard or even impossible to obtain. Some complex questions are not easily decomposed, and some domains can be very challenging to write synthetic data generators for. We were able to generate synthetic data that covered most of the reasoning types in DROP, but other kinds of complex questions would not be covered by our generator (e.g., questions that require commonsense or causal reasoning).  \\n\\nThere is also significant di fficulty in choosing a level of granularity for decomposition. If a large pretrained model can directly answer a question as complex as “What was Barth’s second field goal?”, we should let the model answer the question instead of trying to decompose it further. The right granularity for the decomposition thus depends on the capabilities of the underlying model, and those capabilities are rapidly changing as newer and larger pretrained models are released. There is the possibility that newer model iterations will not need any decomposition to answer the complex questions covered by our synthetic data generator, making that generator obsolete. However, it seems unlikely that pretrained models will be able to handle all complex scenarios in the near future, so the ideas of successive prompting and generating synthetic data to bridge reasoning gaps should still be applicable even when our particular application of them becomes obsolete.', 'reference': '[3] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6'}, 4: {'id': 4, 'title': 'ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness', 'content': '# Acknowledgements\\nWe thank the reviewers and the area chairs for their helpful comments. We also thank Peter Hase, Prateek Yadav, and Shiyue Zhang for their feedback. This work was supported by NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, and a Google Ph.D. Fellowship. The views contained in this article are those of the authors and not of the funding agency.\\n\\n# Limitations\\nAn interesting assumption for future work to address is that all knowledge typically needed to evaluate the correctness of a reasoning step is explicitly present as part of the input or the intermediate reasoning steps. In scenarios where correctness depends on implicit knowledge, we rely on the choice of underlying models (described in Appendix A )which are built on top of pre-trained LMs and are known to capture a lot of background knowledge ( Petroni et al. ,2019 ;Roberts et al. ,2020 ). However, inferences that rely on substantial implicit knowledge may not be best evaluated through current metrics. While current evaluation frameworks focus on evaluating the quality of modelgenerated reasoning chains, Wei et al. (2022 ) note that the chain itself may not faithfully reflect the internal reasoning process of the model. This remains an open question for future work to address.', 'reference': '[4] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7'}, 5: {'id': 5, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[5] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 6: {'id': 6, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[6] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}, 7: {'id': 7, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[7] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 8: {'id': 8, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[8] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 9: {'id': 9, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[9] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 2: {'id': 2, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[2] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}, 3: {'id': 3, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 2 RELATED WORK\\nEmergent Abilities and Multi-Step Reasoning With the recent trend in scaling language models (Brown et al., 2020; Chowdhery et al., 2022), a central question is what unique abilities emerge as models become large (Kaplan et al., 2020; Wei et al., 2022a). Generally, the ability to follow the format of given prompts (typically few-shot) thus solving the corresponding tasks (also referred as in-context learning), is something that large language models are particularly skilled at (Shin et al., 2020; Liu et al., 2021). Among the wide language understanding task spectrum, we are particularly interested in multi-step reasoning because of its two uniqueness: (1). multistep reasoning is a task where large models substantially outperform smaller models (Wei et al., 2022b), versus performance gains on tasks like sentiment classification can be very limited with large models (Shin et al., 2020); (2). multi-step reasoning is where few-shot prompting starts to outperform full training set fine-tuning, even when fine-tuning is conducted on the same large model (Lewkowycz et al., 2022). This work takes an important step forward in multi-step reasoning by showing the critical role of prompt complexity.  \\n\\nChain-of-Thoughts Reasoning A prominent work demonstrating the multi-step reasoning of language models is chain-of-thoughts prompting (Fig. 1A), proposed by Wei et al. (2022b). They show that the reasoning ability can only be elicited by chain of thoughts, but not standard prompting where an answer directly follows a question without intermediate reasoning steps. Further works show that CoT can be improved by self-consistency (Wang et al., 2022b), pretraining the model with latex-formated data (Lewkowycz et al., 2022), context selection (Creswell et al., 2022), or even adding certain magic phrases like “Let’s think step by step” (Kojima et al., 2022). The original CoT paper (Wei et al., 2022b) uses 8 manually written examples as the prompt, which are reused by most follow-up works. Our work sits in the context of CoT reasoning, and propose a new complexitybased prompt selection that substantially outperforms the original CoT.  \\n\\nExample Selection for Prompting Designing prompts can be challenging due to the instability, as multiple works have shown the performance is sensitive to prompt, task, dataset, and model changes (Zhao et al., 2021; Lu et al., 2022; Su et al., 2022). Despite works on automatic prompt searching (which is more suitable for smaller models, e.g., Shin et al., 2020; Li & Liang, 2021), currently, prompt engineering for large models is (still) a community-wide collective trial and error effort (there is even a prompt marketplace named PromptBase). The difficulty is that it is extremely hard to extract generalizable regularity from empirical observations that can form effective selection criteria . One notable exception is similarity-based prompt selection, which retrieves the most similar training instances as the prompt for a given test case (Rubin et al., 2022). Yet for CoT prompting, retrieving different prompts for different test cases requires reasoning chain annotations for the whole training set, which compromises the advantage of being few-shot. Given this background, our core contribution is identifying complexity as an effective and robust selection criterion and in many cases, it outperforms existing prompt selection schemes while being annotation-efficient.  \\n\\nRelation to Classical Semantic Parsing The procedure of chain of thoughts prompting is conceptually similar to classical semantic parsing where one generates a logical form then executes it upon a knowledge base to reach a final answer (Liang, 2016; Cheng et al., 2019). The practice of sampling then voting is also similar to marginalizing out semantic parses (Yin et al., 2018). There are further works linking the relationship between in-context learning and classical Bayesian inference (Wei et al., 2021; Xie et al., 2022). From our perspective, we tend to view chain-ofthoughts as flexible, language model styled “logical forms” which are “executed” by the language model itself. We leave further study on connecting classical parsing and CoT to future work.\\n\\n# 3 COMPLEXITY -BASED PROMPTING\\nWe study multi-step reasoning tasks, and use math word problems, mathematical problems expressed in natural language, as our testbed. This task, as is measured by solve rate (accuracy), is to predict the answer (typically a number) of a given math word problem via intermediate steps. We follow the chain-of-thoughts prompting framework and compare all prompting schemes using GPT-3 text-davinci-002 and Codex code-davinci-002 . An example problem, as well as the chain-of-thoughts workflow, is shown in Fig. 1A. The input is a stack of a few (often 8) CoT cases followed by a test question, then the language model continues generating an output CoT for the test question. Our goal is to improve the reasoning accuracy by identifying and exploiting more effective input and output reasoning chains.\\n\\n# 3.1 SELECTING COMPLEX SAMPLES AS PROMPTS\\nOur method is to simply choose complex prompts over simple ones. We hypothesize that language models’ reasoning performance will increase if we use complex instances as in-context “training example,” as they intuitively subsume simpler instances (Richardson & Sabharwal, 2022). We define complex instances as instances with more reasoning steps (Fig. 1B), as the name “multistep reasoning” indicates. Note that using reasoning steps as the notion of complexity is also the practice of previous works like (Sugawara et al., 2018; Lai et al., 2021). We further define a step as a line, separated by the linebreak “ \\\\n ”.  \\n\\nThere are two aspects that need more discussion: (1) The notion of complexity. There are other complexity indicators than number of steps, such as questions lengths or the length of the underlying formula for solving a given problem. We will show that the trend that better performance comes with more complex prompts is consistent across various complexity indicators, such as question lengths and formula lengths . Consequently, for datasets that do not have annotated reasoning chains, we can use questions lengths to identify complex instances, then only annotate the identified few-shot instances, thus reducing the annotation cost. (2) Confounders of number of steps. The increase in performance with more complex examples in the prompt could be explained by correlated factors like the increase in the total number of reasoning steps in the prompts or just the increased length of the prompt. To account for this, we evaluate prompts with simpler examples but the same number of reasoning steps (e.g. 24 cases with 3 steps vs. 8 cases with 9 steps, both of 72 steps in total). We also consider prompts of the longest lengths (but not most steps). We show that the number of steps per example is the most prominent source of performance gains over confounders.', 'reference': '[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2'}, 4: {'id': 4, 'title': 'Language Models are Multilingual Chain-of-Thought Reasoners', 'content': '# 3 MULTILINGUAL CHAIN -OF -T HOUGHT PROMPTING\\nWe provide an overview of standard prompting and chain-of-thought prompting, as well as their extensions to the multilingual setting, which we illustrate in Table 1 and use in our experiments (§ 4 ).  \\n\\nIn standard prompting, given a prompt in the source language, the model is asked to predict the answer ( Brown et al. ,2020 ;Schick & Schütze ,2021 ). This can be done in a zero-shot or few-shot setting by providing exemplars following the same template as additional input to the model. We refer to this setting as direct answer prediction (D IRECT )as the model directly predicts the answer to the problem. This setting measures the model’s ability to solve problems without any intermediate reasoning steps.  \\n\\nChain-of-thought (C OT; Wei et al. ,2022b ) prompting helps improve many few-shot reasoning tasks, by augmenting few-shot examples with intermediate reasoning steps that should be predicted by the model. In the multilingual setting, we can apply CoT to solve the problem in the native language (N ATIVE -C OT) by predicting the reasoning steps in the original language of the problem. This measures the model’s ability to both understand and solve the problem in a specific language.  \\n\\nAlternatively, we can ask the model to predict the chain of thought in English (EN-C OT) , regardless of the problem language. Such an approach may be useful as English is often used as the source language for cross-lingual transfer ( Hu et al. ,2020 ) and has been found effective when used as the prompt language ( Zhao & Schütze ,2021 ;Winata et al. ,2021 ;Lin et al. ,2021b ).  \\n\\nFinally, we can translate the problem to English and solve it with English CoT (T RANSLATE -EN) . In this setting, we use the Google Translate API to translate problems into English. This mirrors the translate-train setup ( Hu et al. ,2020 ;Xue et al. ,2021 ;Ruder et al. ,2021 ), the best-performing setting for fine-tuning multilingual models where the training data is translated to English.  \\n\\n<html><body><table><tr><td></td><td>DIRECT</td><td>NATIVE-COT</td><td>EN-CoT</td><td>TRANSLATE-EN</td></tr><tr><td>NATIVE-EXEMPLARS</td><td></td><td></td><td></td><td></td></tr><tr><td>ENGLISH-EXEMPLARS</td><td></td><td>N/A</td><td></td><td>N/A</td></tr><tr><td>MULTILINGUAL-EXEMPLARS</td><td></td><td></td><td></td><td>N/A</td></tr></table></body></html>  \\n\\n  \\nTable 2: Possible combinations between few-shot exemplar selection and solution strategies.   \\nFigure 3: The chain-of-thought prompts and example model outputs in the MGSM experiments. The solutions are written in the same language as the questions of interest (N ATIVE -C OT).  \\n\\nBeyond the prompting methods, there are different ways to provide few-shot examples in context for multilingual prompting:  \\n\\n•All native question exemplars (N ATIVE -E XEMPLARS ). We use a few in-language questions together with their solutions as the few-shot prompt exemplars. This is the most natural setting when we have a few examples in each investigated language.   \\n•All English question exemplars (E NGLISH -E XEMPLARS ). When we are unable to access any existing questions or solution examples in some languages, an intuitive way is to use English questions and solutions as exemplars to perform zero-shot cross-lingual transfer. Note that it is unrealistic to combine this exemplar selection setting with N ATIVE -C OT, since we assume no access to the native language for prompting.   \\n•Generic multilingual question exemplars (M ULTILINGUAL -E XEMPLARS ). Similar to ENGLISH -E XEMPLARS , we assume access to questions and solutions in a few languages, and test if multilingual exemplars better elicit the multilingual reasoning ability of models.  \\n\\nFor T RANSLATE -EN, as all exemplar questions and solutions are in English, we only experiment with the translated native question exemplars and English CoT. We summarize the combinations of prompting and exemplar methods in Table 2 , and present an illustration in Figure 3 . Detailed prompting input for each investigated combination can be found in Appendix A.2 .\\n\\n# 4 EXPERIMENTS ON MGSM\\nIn this section, we evaluate the multilingual reasoning abilities of two representative state-of-the-art pretrained large language models—GPT-3 ( Brown et al. ,2020 ) and PaLM ( Chowdhery et al. ,2022 )—on our MGSM benchmark in various prompting settings using exemplars in the source language Table 3: Accuracy $(\\\\%)$ on MGSM of different models and languages with exemplar questions in native languages (N ATIVE -E XEMPLARS ). HRL : average performance across high-resource languages with larger than $0.1\\\\%$ frequency in the training corpora; URL : average performance across underrepresented languages. We use 6 questions and solutions as the few-shot exemplar whenever possible: while the token number for 6-shot prompts in some languages may exceed the token number limit of GPT-3, we use the maximum possible number of exemplars instead for these cases. Detailed numbers of exemplars for each language in GPT-3 experiments can be found in Appendix A.1 . The best numbers in each column are in boldface .  \\n\\n<html><body><table><tr><td></td><td>AVG</td><td>HRL</td><td>URL</td><td>EN</td><td>DE</td><td>FR</td><td>ES</td><td>RU</td><td>ZH</td><td>JA</td><td>TH</td><td>TE</td><td>BN</td><td>SW</td></tr><tr><td>Lang. Freq. (PaLM, %)</td><td></td><td>一</td><td></td><td>78.0</td><td>3.5</td><td>3.3</td><td>2.1</td><td>.53</td><td>.40</td><td>.38</td><td>.04</td><td>.02</td><td>.006</td><td>.005</td></tr><tr><td colspan=\"10\">GPT-3(text-davinci-002)</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>DIRECT</td><td>11.7</td><td>15.1</td><td>5.7</td><td>16.0</td><td>14.8</td><td>16.8</td><td>17.2</td><td>12.418.0</td><td></td><td>11.2</td><td>8.8</td><td>0.8</td><td>4.4</td><td>8.8</td></tr><tr><td>NATIVE-COT</td><td>26.4</td><td>34.7</td><td>7.2</td><td>53.6</td><td>36.0</td><td>37.6</td><td>40.4</td><td>28.4</td><td>40.0</td><td>26.0</td><td>10.8</td><td>0.4</td><td>6.4</td><td>11.2</td></tr><tr><td>EN-CoT</td><td>31.6</td><td>39.4</td><td>13.9</td><td>53.6</td><td>44.0</td><td>46.0</td><td>44.8</td><td>28.4</td><td>40.8</td><td>32.4</td><td>19.6</td><td>5.6</td><td>9.6</td><td>20.8</td></tr><tr><td>TRANSLATE-EN</td><td></td><td>45.6 47.5</td><td>40.7</td><td>53.6</td><td>46.4</td><td></td><td>46.4 51.6 48.8 47.2</td><td></td><td></td><td>44.8</td><td>41.2</td><td>42.8</td><td>41.2</td><td>37.6</td></tr><tr><td colspan=\"10\">PaLM-540B</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>DIRECT</td><td>18.6</td><td>19.3</td><td>16.8</td><td>22.0</td><td>18.8</td><td>19.6</td><td>20.0</td><td>22.0</td><td>19.2</td><td>16.0</td><td>16.8</td><td>17.6</td><td>17.2</td><td>15.6</td></tr><tr><td>NATIVE-COT</td><td>48.1</td><td>47.9</td><td>44.9</td><td>62.4</td><td>49.2</td><td>46.4</td><td>56.8</td><td>48.4</td><td>46.8</td><td>40.0</td><td>52.8</td><td>45.6</td><td>46.0</td><td>35.2</td></tr><tr><td>EN-CoT</td><td>51.3</td><td>52.3</td><td>46.8</td><td>62.4</td><td>53.6</td><td>51.2</td><td>58.0</td><td>55.6</td><td>46.0</td><td>49.6</td><td>49.6</td><td>46.8</td><td>46.4</td><td>44.4</td></tr><tr><td>TRANSLATE-EN</td><td>55.0</td><td></td><td>56.3 51.2</td><td>62.4</td><td>57.2</td><td>55.2</td><td>60.0</td><td></td><td>59.6 55.6</td><td>50.0</td><td>50.8</td><td></td><td></td><td>49.653.2 51.2</td></tr></table></body></html>  \\n\\n(N ATIVE -E XEMPLARS ). Throughout this paper, we generate outputs using greedy decoding (i.e., sampling with temperature $\\\\tau=0$ ).', 'reference': '[4] Language Models are Multilingual Chain-of-Thought Reasoners, ICLR, 2023, chunk 1'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 5 Related Work\\nMemorization and Reasoning in PLMs. With the recent success of large-scale pre-trained language models (PLMs), there has been growing interest in investigating what is captured by these PLMs during pre-training ( Talmor et al. ,2020a ;Rogers et al. ,2020 ;Kassner et al. ,2020 ). Studies have shown that in addition to learning linguistic knowledge about language use, PLMs are capable of memorizing a great amount of world knowledge ( Rogers et al. ,2020 ), yielding competitive performance on knowledge probing ( Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ) and other knowledge-intensive tasks such as question answering ( Roberts et al. ,2020 ) and fact checking (Lee et al. ,2020 ), without resorting to any external knowledge source. On the other hand, other work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in recalling their stored knowledge for multi-step inferences, such as answering complex, multi-hop  \\n\\nFor existing work on learning-based prompting, (Shin et al. ,2020 ) proposes to use gradient-guided search to find appropriate discrete prompt tokens in the PLM’s vocabulary to form prompt templates. While the resulting prompts are readable, most of them have very low fluency and interpretability. (Zhong et al. ,2021 ;Qin and Eisner ,2021 ) propose to optimize the prompts in continuous space instead, which shows large benefits in both effectiveness and optimization efficiency. ( Zhong et al. ,2021 ) also raises and studies the question about whether learning-based prompting could exploit spurious dataset regularities which would weaken the validity of standard evaluation results, a concern we seriously address in our work. ( Lester et al. ,2021 ;Li and Liang ,2021 ) follow the continuous prompting paradigm, and tune task-level prompts for lightweight adaptation of PLMs. Overall, existing prompt learning methods are either restricted to cases where there exists a single & identifiable relation/predicate within the query ( Zhong et al. ,2021 ;Qin and Eisner ,2021 ;Shin et al. ,2020 ), or being static and not sensitive to sample-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ).  \\n\\nIterative Knowledge Retrieval. We are also inspired by methods that iteratively retrieve knowledge from explicit knowledge sources for multistep reasoning, such as ( Xiong et al. ,2021 ;Qi et al. ,2019 ). Our problem setting could be viewed as iterative retrieval over implicit knowledge in PLMs, instead of from explicit knowledge sources.\\n\\n# 6 Conclusion & Future Work\\nWe explore an iterative prompting framework towards driving a “train of thought” from PLMs for multi-step reasoning tasks. We show the superiority of this iterative scheme, and also effectiveness of our proposed context-aware prompter design, which addresses key limitations of previous prompting methods when applied in this new scheme. In addition, we conduct both quantitative & qualitative analysis on the faithfulness of the learned prompting behaviors. In the future, we aim to further extend and apply our ideas to Language Model pretraining, with the hope that PLMs can be inherently equipped with stronger multi-step reasoning capabilities.\\n\\n# Acknowledgement\\nThe authors would like to thank the OSU NLP group members for their thoughtful comments. This research was sponsored in part by Google Faculty Award, NSF IIS-1815674, NSF CAREER #1942980, NSF OAC-2112606, and Ohio Supercomputer Center ( Center ,1987 ).\\n\\n\\n\\n# A Appendix\\n\\n# A.1 Hyperparameters\\nWe set the batch size to be 32, 128, 32 and train for 70, 50, 40 epochs for 2Wiki, LoT & R4C respectively. Table 5 summarizes other hyperparameters used in our experiments.  \\n\\nTable 5: Hyperparameter settings for all compared methods. lr: learning rate, pt_len: prompt length.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">2Wiki</td><td colspan=\"2\">LoT</td><td colspan=\"2\">R4C</td></tr><tr><td></td><td>lr pt_len</td><td>lr</td><td>pt_len</td><td>lr</td><td>pt_len</td></tr><tr><td>Prompt-T</td><td>8e-3</td><td>80 4e-3</td><td>80</td><td>4e-3</td><td>60</td></tr><tr><td>Prefix-T</td><td>8e-4</td><td>80 4e-4</td><td>60</td><td>4e-4</td><td>80</td></tr><tr><td>PLM-FT</td><td>4e-5</td><td>4e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>PLM-QA</td><td>4e-5</td><td>8e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>Ours</td><td>8e-5</td><td>30</td><td>8e-5 60</td><td>8e-5</td><td>30</td></tr></table></body></html>\\n\\n# A.2 More Examples on Prompter Attention Visualizations\\nFigure 5: Prompter attention visualization. Reasoning type: Comparison.  \\n\\n  \\nFigure 4 ,5 ,6 ,7 show additional example prompter attention visualizations in the 2Wiki dataset, each corresponding to a different reasoning type (composition, comparison, inference & bridge-comparison respectively).   \\nFigure 4: Prompter attention visualization. Reasoning type: Composition.  \\n\\n  \\nFigure 6: Prompter attention visualization. Reasoning type: Inference.  \\n\\n<html><body><table><tr><td>prompt</td><td>prompt</td><td></td><td></td><td></td><td>prompt</td><td>prompt</td><td></td><td>prompt</td><td>prompt</td><td>Which</td><td></td><td>film</td><td></td><td>prompt Which whose director Is</td></tr><tr><td>Which film whose</td><td></td><td>Which film whose ?</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film who</td><td></td><td>film whose</td><td></td><td></td></tr><tr><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is</td><td></td><td>director is younger</td><td></td><td>drector Is</td><td></td><td>younger</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>younger</td><td></td><td></td><td></td><td>younger</td><td></td><td></td></tr><tr><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh uoo</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh oon</td><td></td><td>Kh oon</td></tr><tr><td>uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>Ka</td><td></td><td>oon Ka</td><td></td><td>oon Ka</td><td></td><td>Ka</td><td></td><td>Ka</td></tr><tr><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td></tr><tr><td>Kh uoo</td><td></td><td>Uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>oon</td><td></td><td>oon</td><td></td><td>oon or</td></tr><tr><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td></td><td>or</td><td>or</td><td></td><td>or</td><td></td><td>Idaho</td></tr><tr><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Transfer</td></tr><tr><td>Transfer ?</td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transf</td><td></td><td>Transfer</td><td></td><td>Transfer</td><td></td><td>Transfer ?</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>？</td><td></td><td>?</td><td></td><td></td><td>?</td><td>?</td><td></td><td></td><td>?</td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>S</td><td></td><td>S</td><td></td><td></td><td>S</td><td>S</td><td></td><td>S</td><td></td><td>S</td></tr><tr><td></td><td></td><td></td><td></td><td>oh</td><td></td><td>oh</td><td></td><td></td><td>oh</td><td>oh</td><td></td><td>oh</td><td>oh</td></tr><tr><td></td><td></td><td></td><td></td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>Modi</td><td>Modi</td><td></td><td></td><td>Modi</td></tr><tr><td></td><td></td><td></td><td>!pow</td><td></td><td>Modi</td><td></td><td>is</td><td></td><td></td><td>Modli</td><td></td></tr><tr><td></td><td></td><td></td><td>is</td><td></td><td></td><td></td><td></td><td></td><td></td><td>Is</td><td>is</td></tr><tr><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td></td><td>director of</td><td>director of</td><td></td><td>drector</td><td>drector</td></tr><tr><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td></td><td>Kh</td><td>Kh</td><td></td><td>of Kh</td><td>of Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>oon</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>Ka</td><td>Ka</td><td></td><td>Ka</td><td>Ka</td></tr><tr><td></td><td></td><td></td><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td>Kh</td><td></td><td>Kh</td><td>Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>oon</td><td></td><td>uoo</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td></td><td></td><td>Peter</td><td>Peter</td><td></td><td>Peter</td><td>Peter</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>F</td><td>F</td><td></td><td>F</td><td>F</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>onda</td><td>onda</td><td>onda</td><td></td><td>onda</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>is</td><td></td><td>is</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td>director</td><td></td><td>director</td></tr><tr><td>PeterFondaisdirector of IdahoTransfer,</td><td></td><td></td><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td>of</td><td></td><td>of</td></tr><tr><td>2November1897isdateof birthof SohrabModi, February 23, 1940 is date of birth of Peter Fonda]</td><td></td><td></td><td></td><td></td><td></td><td>Idaho</td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td></tr><tr><td>Cq: [ Sohrab Modi is director of Khoon Ka Khoon,</td><td></td><td></td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transfer</td><td>Transfer November 1897 date birth Modi</td><td>2 is of of S oh rab</td><td>Transfer 2 November 1897 date of birth of S oh Modi</td></tr><tr><td>q: Which film whose director is younger, Khoon Ka Khoon or Idaho Transfer?</td><td></td><td></td><td></td><td></td><td></td><td></td></table></body></html>', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 6'}, 6: {'id': 6, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[6] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 7: {'id': 7, 'title': 'LLMs Can Find Mathematical Reasoning Mistakes by Pedagogical Chain-of-Thought', 'content': '# 1 Introduction\\nIn recent years, Large Language Models (LLMs) have emerged as the leading force in advancing various Natural Language Processing (NLP) tasks, consistently achieving state-of-the-art performance. Despite the remarkable improvement in general AI abilities, LLMs still suffer from some essential problems, e.g., hallucination, which suggests that current LLMs may generate contents that look reasonable but intrinsically illogical [Huang et al. , 2023b].  \\n\\nTo overcome hallucination, many efforts have been made, among which self-correction is one of the most recently focused directions. It empowers LLMs to correct their outputs by zero- or few-shot prompting [Pan et al. , 2023]. Some researchers treat self-correction as a single-process way [Chen et al. , 2023; Madaan et al. , 2023], while some researchers break down the task into two components, i.e., mistake finding and output correction , and achieve superior performance [Tyen et al. , 2023]. Since finding mistakes is a fundamental and precedent skill for right correction, it is reasonable that the two-step self-correction is more reliable.  \\n\\nMistake finding is a prerequisite of self-correction, while how to well achieve it is still an open challenge. Some studies show that current LLMs struggle to find mistakes reliably using basic prompting strategies [Huang et al. , 2023a; Tyen et al. , 2023]. Most previous benchmark studies mainly adopt simple and general prompting strategies, e.g., Zero-shot CoT and its variants 1 . They do not fully leverage domainrelated knowledge to design prompts. Domain-related knowledge is important because there are various kinds of reasoning problems, such as word sorting, tracking shuffled objects, logical deduction and multi-step arithmetic [Srivastava et al. ,2023], which require different kinds of fine-grained reasoning skills. For example, word sorting needs the skills of text and order understanding while multi-step arithmetic needs the skills of calculation and deduction. Hence, without domainrelated knowledge, the potential of LLMs may be underestimated due to inadequately designed prompts.  \\n\\nIn this paper, we investigate the challenge of creating efficient prompts for a specific domain to identify reasoning errors utilizing LLMs. As a starting point, we choose the mathematical domain, a basic subject in education. On the one hand, mathematical questions queries necessitate rigorous reasoning and logic, an area where current LLMs fall short. On the other hand, finding reasoning mistakes in mathematics can provide a foundation for automatic answer grading in terms of pedagogy. To this end, we conduct interdisciplinary research on LLMs prompting strategies by adopting pedagogical knowledge within this work to achieve reliable mathematical mistake finding.  \\n\\n  \\nFigure 1: We develop the principles for prompt design for LLMs by leveraging the educational Bloom Cognitive Model and we focus on the learning ability . The bold parts are keywords used in prompts.  \\n\\nIn the field of pedagogy, the teaching and learning objectives are theoretically consistent with the Bloom Cognitive Model $({\\\\tt B C M})^{2}$ [Bloom et al. , 1984], which breaks the student abilities into six levels from a cognitive perspective, i.e., Remember, Understand, Apply, Analyze, Evaluate, and Create (as shown in Figure 1) [Anderson and Krathwohl, 2001]. According to this theory, the first three foundational levels are associated with learning ability , signifying that these skills are relatively easier to acquire, whereas the upper three levels involve planning ability , which are more complex to nurture and develop. We focus on the learning abilities in this work and leave the exploration of planning abilities as future work.  \\n\\nTo align with BCM and bridge the gap in design prompting strategies for LLMs, we introduce a novel method named Pedagogical Chain-of-Thought (abbr. PedCoT). More specifically, the PedCoT strategy embodies two parts. First, following the BCM and the levels of learning ability , we propose the pedagogical principles of prompt (PPP) design for LLMs. The detailed content of PPP is consistent with learning ability , as shown in Figure 1. Second, concerning the content of prompts to be grounded when interacting with LLMs, we formulate the two-stage interaction process (TIP) and grounded prompts, consistent with the three principles.  \\n\\nTo assess the efficacy of our approach, we gather two publicly available datasets, each containing mathematical problems of varying degrees of complexity. i.e., multi-step arithmetic and multi-step word problems. The experimental results, compared against strong baselines, consistently reveal a noteworthy contrast to what most existing studies have asserted. Specifically, we observe that current LLMs, such as GPT-4 Turbo, can effectively find mathematical reasoning mistakes through our proposed PedCoT. The result highlights the importance and value of domain knowledge in prompting the reasoning abilities of LLMs.  \\n\\nThe main contributions of this paper include:  \\n\\n• We conduct an interdisciplinary study and investigate a new problem on how to leverage domain knowledge to guide prompt design for LLMs, i.e., leveraging pedagogical theories to find mathematical mistakes. • We develop a novel zero-shot prompting strategy named PedCoT to bridge the gap between educational theory and prompts for LLMs, which is featured by (1) pedagogical principles for prompt (PPP) design, (2) twostage interaction process (TIP) and (3) PedCoT prompts. • Experiments on two public datasets with various complexity of math problems demonstrate that contrary to what most existing studies claim, current LLMs actually can find mathematical reasoning mistakes by using our PedCoT equipped with pedagogical theory.', 'reference': '[7] LLMs Can Find Mathematical Reasoning Mistakes by Pedagogical Chain-of-Thought, IJCAI, 2024, chunk 1'}, 8: {'id': 8, 'title': 'Instruction Induction: From Few Examples to Natural Language Task   Descriptions', 'content': '# 7 Related Work\\nIn-Context Learning Brown et al. [2020] suggest that models can learn a task by conditioning on few input-output demonstration pairs, without any fine-tuning or gradient updates. This paradigm, known as in-context learning or prompt-based learning [Liu et al., 2021], has been the focus of many research efforts lately: Du et al. [2021] suggest methods for more efficient in-context learning, Zhao et al. [2021] study methods for improving the stability and accuracy of prompt-based models, Chen et al. [2021] and Min et al. [2022a] conduct meta-training with an in-context learning objective, while other work studies the effect of the provided prompts [Reynolds and McDonell, 2021, Webson and Pavlick, 2021, Min et al., 2022b], or suggests prompt reframing techniques [Mishra et al., 2021] and prompt retrieval methods [Rubin et al., 2021]. To the best of our knowledge, all previous work study in-context learning through the lens of executing a latent task, while we focus on the ability to explicitly describe it.  \\n\\nThe Instruction Paradigm Efrat and Levy [2020] propose to learn new tasks from natural language instructions. Mishra et al. [2022] and Wang et al. [2022b] collect crowdsourcing instructions used to create NLP datasets into a benchmark for measuring the ability to solve tasks by reading instructions. Recent work shows that fine-tuning on task instructions ( instruction tuning ) improves the zero-shot learning abilities of LMs [Sanh et al., 2022, Wei et al., 2022a, Ouyang et al., 2022]. This work focuses on models’ ability to generate instructions, rather than their ability to execute instructions written by humans.  \\n\\nIntermediate Reasoning Steps Nye et al. [2022] show that LMs can perform complex computations by writing intermediate steps on a “scratchpad”. In chain of thought prompting [Wei et al., 2022b], input-output demonstrations are enriched with sentences elaborating intermediate task reasoning steps, improving the performance of LMs on tasks requiring reasoning skills. Subsequent work further improves the performance on such tasks using a self-consistency ensemble [Wang et al., 2022a], which samples a set of diverse chain-of-thought reasoning paths, taking the majority vote over all generated answers. Zelikman et al. [2022] utilize a small set of examples labeled with chain-of-thought rationales and a large set of unlabeled data to iteratively bootstrap automatic rationale generation, thus creating a large dataset labeled with such rationales to enable fine-tuning. In contrast, we study the ability of LMs to generate a description of the task, rather than generating intermediate reasoning steps as a means of executing complex tasks.\\n\\n# 8 Discussion\\nThis work demonstrates that large LMs can not only infer new tasks based on a handful of demonstrations, but also describe them in natural language. We provide evidence of this ability on a diverse set of language tasks, and show that while instruction induction abilities are limited to a single state-of-the-art model, this model does indeed approach human performance on about half the tasks.  \\n\\nIt is not unreasonable to assume that models in the near future will be even better at processing human-generated instructions, and it is therefore interesting to discuss the potential applications of instruction induction. In particular, we envision a use case in which instruction induction serves as a machine learning approach; instead of converting a dataset into a set of continuous parameters, we could produce a natural language instruction that best describes the data. Grounding the model in concise natural language has the advantage of interpretability, and has the potential to solve fundamental issues pertaining to spurious correlations. While it is still too early to determine whether this approach is viable, we view it as an intriguing direction for future research.\\n\\n\\n\\n# A Dataset Details\\nThis appendix details each task’s dataset (§A.1). Some datasets rely on a set of common English nouns (CEN), described at $\\\\S\\\\mathrm{A}.2$ .', 'reference': '[8] Instruction Induction: From Few Examples to Natural Language Task   Descriptions, ACL, 2023, chunk 4'}, 9: {'id': 9, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# COMPLEXITY -B ASED PROMPTING FOR MULTI -STEP REASONING\\nYao $\\\\mathbf{F}\\\\mathbf{u}^{\\\\star}$ $\\\\mathbf{\\\\ddot{\\\\rho}}\\\\mathbf{Hao}\\\\ \\\\mathbf{Peng}^{\\\\pmb{\\\\alpha}}$ , Ashish Sabharwal ♣, Peter Clark ♣, Tushar Khot ♣♠University of Edinburgh ♣Allen Institute for AI yao.fu $@$ ed.ac.uk, haop $@$ allenai.org, ashishs $@$ allenai.org, peterc $@$ allenai.org, tushark $@$ allenai.org\\n\\n# A BSTRACT\\nWe study the task of prompting large-scale language models to perform multistep reasoning. Existing work shows that when prompted with a chain of thoughts (CoT), sequences of short sentences describing intermediate reasoning steps towards a final answer, large language models can generate new reasoning chains and predict answers for new inputs. A central question is which reasoning examples make the most effective prompts. In this work, we propose complexitybased prompting, a simple and effective example selection scheme for multi-step reasoning. We show that prompts with higher reasoning complexity , i.e., chains with more reasoning steps, achieve substantially better performance on multistep reasoning tasks over strong baselines. We further extend our complexitybased criteria from prompting (selecting inputs) to decoding (selecting outputs), where we sample multiple reasoning chains from the model, then choose the majority of generated answers from complex reasoning chains (over simple chains). When used to prompt GPT-3 and Codex, our approach substantially improves multi-step reasoning accuracy and achieves new state-of-the-art (SOTA) performance on three math benchmarks (GSM8K, MultiArith, and MathQA) and two BigBenchHard tasks (Date Understanding and Penguins), with an average $+5.3$ and up to $+18$ accuracy improvements. Compared with existing example selection schemes like manual tuning or retrieval-based selection, selection based on reasoning complexity is intuitive, easy to implement, and annotation-efficient. Further results demonstrate the robustness of performance gains from complex prompts under format perturbation and distribution shift.', 'reference': '[9] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 2: {'id': 2, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 3: {'id': 3, 'title': 'ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness', 'content': '# Acknowledgements\\nWe thank the reviewers and the area chairs for their helpful comments. We also thank Peter Hase, Prateek Yadav, and Shiyue Zhang for their feedback. This work was supported by NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, and a Google Ph.D. Fellowship. The views contained in this article are those of the authors and not of the funding agency.\\n\\n# Limitations\\nAn interesting assumption for future work to address is that all knowledge typically needed to evaluate the correctness of a reasoning step is explicitly present as part of the input or the intermediate reasoning steps. In scenarios where correctness depends on implicit knowledge, we rely on the choice of underlying models (described in Appendix A )which are built on top of pre-trained LMs and are known to capture a lot of background knowledge ( Petroni et al. ,2019 ;Roberts et al. ,2020 ). However, inferences that rely on substantial implicit knowledge may not be best evaluated through current metrics. While current evaluation frameworks focus on evaluating the quality of modelgenerated reasoning chains, Wei et al. (2022 ) note that the chain itself may not faithfully reflect the internal reasoning process of the model. This remains an open question for future work to address.', 'reference': '[3] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7'}, 4: {'id': 4, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[4] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 5: {'id': 5, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[5] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 1 Introduction\\nText-to-SQL parsing, the task of translating a natural language question into a SQL query, has found wide applications in building natural language interfaces to databases and thus piqued significant research interest in recent years ( Wang et al. ,2020 ;Deng et al. ,2021 ;Yu et al. ,2021 ;Rajkumar et al. ,2022 ;Hongjin et al. ,2023 ;Ni et al. ,2023 ). To develop a text-to-SQL parser, a prevalent approach is to collect labeled data and train a model via supervised learning ( Shaw et al. ,2021 ;Scholak et al. ,2021 ). While effective, this approach necessitates a considerable amount of training data, which is costly to obtain because annotating SQL queries requires programming expertise. Consequently, the lack of data hinders real-life applications of stateof-the-art parsers, especially on novel databases and unseen domains ( Suhr et al. ,2020 ).  \\n\\nAs an alternative to supervised learning, incontext learning ( Brown et al. ,2020 ), an emergent capability of large language models (LLMs), alleviates the need for large-scale data. With only a few examples, in-context learning enables LLMs to demonstrate performance comparable to or even better than fully supervised models on many NLP tasks, such as question answering, machine translation, and natural language inference ( Chowdhery et al. ,2022 ;Kojima et al. ,2022 ;Wei et al. ,2022b ,a ;Brohan et al. ,2023 ). When applied to text-to-SQL parsing, in-context learning also shows encouraging results, but it still lags behind supervised approaches ( Rajkumar et al. ,2022 ;Chang et al. ,2023 ;Liu et al. ,2023a ).  \\n\\nWe hypothesize that the under-performance is because text-to-SQL parsing requires complex, multistep reasoning. Even for a seemingly simple question, such as “What is the ID of Kyle,\" a model has to ground it to database schemas, infer the relational algebra among schema items, and construct syntactically correct SQL clauses. Recently, the chain-of-thought (CoT) style promptings ( Wei et al. ,2022b ;Zhou et al. ,2023 ) are proposed and have shown promising multi-step reasoning capabilities. To enhance LLMs’ reasoning ability, we systematically explore CoT style prompting for text-to-SQL parsing. Specifically, we seek to answer two research questions: (1) Which prompting style is better, generating all reasoning steps in a single pass, or iterative prompting and problem solving? (2) Does including more detailed information in the reasoning steps lead to better results for text-to-SQL parsing?  \\n\\nTo address the questions, we adopt two widely used prompting methods for text-to-SQL parsing As the first method, we apply chain-of-thought prompting (Wei et al. ,2022b ) by drawing an analogy between its problem-solving process and the execution procedure of a SQL query. Referring to the logical execution order of SQL clauses (Narechania et al. ,2021 ), we compose the intermediate execution steps in natural language and prompt LLMs to derive them before generating the SQL query. As the second method, we follow Zhou et al. (2023 ) to apply least-to-most prompting in two stages: (1) reduction: generate a series of sub-questions from the original question and (2) solving: iteratively translate each sub-question into its corresponding SQL query, with the original question as the last sub-question. However, in our case study 1 , we find that directly applying chainof-thought and lease-to-most promptings leads to error propagation issues. Their rationales contain very demonstration-specific information and are easier to mislead the reasoning process. Furthermore, least-to-most prompting technique leads to additional computational and time cost due to the multiple stages of reduction and solving.  \\n\\n  \\nFigure 1: Different prompting methods with multi-step reasoning for text-to-SQL parsing: (a) Chain-of-Thought, (b) Least-toMost, and our proposed (c) QDecomp , and (d) QDecomp $^+$ InterCOL .  \\n\\nTherefore, we propose a new method called question-decomposition prompting (QDecomp ). Similar to chain-of-thought prompting, QDecomp generates a sequence of reasoning steps and then the SQL query in one pass. However, we modify the steps to instruct LLMs to decompose the original complex question, akin to the problem reduction stage in least-to-most prompting. Also, to help LLMs ground database schemas, we design a variant of question decomposition prompting (QDecomp $^+$ InterCOL ) by including the table and column names involved in each sub-question. We conduct comprehensive evaluations on two textto-SQL datasets, Spider ( Yu et al. ,2018 ) and Spider Realistic ( Deng et al. ,2021 ). Our proposed prompting methods substantially outperform existing prompting ones by 2.4 and 1.5 point absolute gains on the development set of Spider and Spider Realistic, respectively. The results suggest that the iterative prompting which is costly due to additional computational resources requirement as in least-to-most prompting may not be necessary $(R Q I)$ . In addition, our analysis shows the proposed question decomposition prompting methods, which do not instruct LLMs to generate detailed reasoning steps, reduce the chance of error propagation when generating the reasoning steps. ( RQ2 ). Finally, we evaluate the robustness of our proposed prompting methods by varying the number, selection, and format of in-context examples and show that they can achieve consistently strong performance across different settings.', 'reference': '[0] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 1'}, 1: {'id': 1, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 2: {'id': 2, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# Exploring Chain-of-Thought Style Prompting for Text-to-SQL\\nChang-You Tai, Ziru Chen, Tianshu Zhang, Xiang Deng, Huan Sun The Ohio State University {tai.97, chen.8336, zhang.11535, deng.595, sun.397}@osu.edu\\n\\n# Abstract\\nConventional supervised approaches for textto-SQL parsing often require large amounts of annotated data, which is costly to obtain in practice. Recently, in-context learning with large language models (LLMs) has caught increasing attention due to its superior few-shot performance in a wide range of tasks. However, most attempts to use in-context learning for text-to-SQL parsing still lag behind supervised methods. We hypothesize that the underperformance is because text-to-SQL parsing requires complex, multi-step reasoning. In this paper, we systematically study how to enhance the reasoning ability of LLMs for text-to-SQL parsing through chain-of-thought (CoT) style promptings including CoT prompting ( Wei et al. ,2022b ) and Least-to-Most prompting (Zhou et al. ,2023 ). Our experiments demonstrate that iterative prompting as in Least-toMost prompting may be unnecessary for textto-SQL parsing and directly applying existing CoT style prompting methods leads to error propagation issues. By improving multi-step reasoning while avoiding much detailed information in the reasoning steps which may lead to error propagation, our new method outperforms existing ones by 2.4 point absolute gains on the Spider development set.', 'reference': '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 0'}, 3: {'id': 3, 'title': 'Interpretable Math Word Problem Solution Generation Via Step-by-step   Planning', 'content': '# D.2 Prompt mining through paraphrasing\\nFor the instruction prompt, finding good prompts is an art that takes time and experience ( Liu et al. ,2021b ). Thus, we apply prompt mining through paraphrasing by first starting with a seed prompt (e.g. “The next step operation is: ”) and paraphrase it into a set of other candidate prompts with similar meaning (Yuan et al. ,2021 ). Then, we tune the model with these candidates by treating them as hyper-parameters and select the one that performs best on the target task. We find that anchor tokens (e.g. “?”) are helpful and leads to good performance, which is consistent with prior work ( Liu et al. ,2021c ).\\n\\n# EArchitecture for text-to-text language modeling\\nSee figure 3', 'reference': '[3] Interpretable Math Word Problem Solution Generation Via Step-by-step   Planning, ACL, 2023, chunk 7'}, 4: {'id': 4, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[4] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:07\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Guiding Large Language Models Via Directional Stimulus Prompting.', 'content': \"# 3.3 Chain-of-Thought reasoning\\nWhile current methods primarily use general task-specific prompts, LLMs show sensitivity to them. Studies [ 69 ,26 ,79 ] demonstrate that LLMs can vary in performance based on the prompt used. As a result, much of the previous work has centered on manually [ 56 ] or automatically [ 61 ,79 ]crafting better prompts. However, these efforts mainly focus on task-specific prompts, which may not be optimal for every instance of a task. In our experiment, we employ our approach to generate instance-specific trigger prompts to elicit Chain-of-Thought (CoT) reasoning. Specifically, we train a policy model ( t5-base ) to generate instance-specific CoT trigger prompts, such as “ Let’s think step by step ”, to optimally prompt varying samples.  \\n\\nDataset and evaluation We adopted the experimental setup from previous work [ 26 ,79 ], where we tested zero-shot CoT reasoning abilities of InstructGPT ( text-davinci-002 ) with different trigger prompts. There are 600 examples in the MultiArith dataset [ 57 ], which we divided into 300/50/250 for training/validation/test set. As for the AQuA dataset [ 35 ], we use the standard test set with 254 samples, 300 samples from the standard training set for our training, and 100 samples for the standard validation set for our validation. We report the reasoning accuracy.  \\n\\nTable 2: Zero-shot chain of thoughts performance of InstructGPT ( text-davinci-002 ) with different prompts. ${}^{*}\\\\mathrm{Our}$ approach trains a policy model to generate instance-specific prompt triggers, which are compared to the task-specific prompts in [26, 79].   \\n\\n\\n<html><body><table><tr><td>No.</td><td>Category</td><td>Chain-of-Thought Trigger Prompt</td><td>MultiArith</td><td>AQuA</td></tr><tr><td>1</td><td>Human-Designed</td><td>Let's thinkstepby step.</td><td>79.6</td><td>31.9</td></tr><tr><td>2</td><td></td><td>Weshouldthinkaboutthisstepbystep.</td><td>81.2</td><td>28.7</td></tr><tr><td>3</td><td></td><td>First,</td><td>78.0</td><td>38.2</td></tr><tr><td>4</td><td></td><td>Beforewediveintotheanswer,</td><td>54.8</td><td>27.2</td></tr><tr><td>5</td><td></td><td>Prooffollowedbytheanswer</td><td>58.4</td><td>37.8</td></tr><tr><td>6</td><td></td><td>Let'sthinkstepbystepinarealisticway.</td><td>59.6</td><td>33.9</td></tr><tr><td>7</td><td></td><td>Let's thinkstepby step usingcommon sense and knowledge.</td><td>80.0</td><td>34.3</td></tr><tr><td>8</td><td></td><td>Let'sthinklikeadetectivestepbystep.</td><td>73.6</td><td>24.0</td></tr><tr><td>9</td><td></td><td>Let'sthinkaboutthislogically.</td><td>75.2</td><td>34.7</td></tr><tr><td>10</td><td></td><td>Let'sthink stepby step.First,</td><td>78.8</td><td>32.3</td></tr><tr><td>11</td><td></td><td>Let'sthink</td><td>56.8</td><td>38.2</td></tr><tr><td>12</td><td></td><td>Let'ssolvethisproblembysplittingit intosteps.</td><td>72.4</td><td>33.2</td></tr><tr><td>13</td><td></td><td>Theansweris aftertheproof.</td><td>42.8</td><td>34.3</td></tr><tr><td>14</td><td></td><td>Let'sberealisticandthinkstepbystep.</td><td>69.6</td><td>29.9</td></tr><tr><td>15</td><td>APE [79]</td><td>Let's work this out in a stepby stepway to be surewehavetherightanswer</td><td>81.6</td><td>34.3</td></tr><tr><td>16</td><td>DSP w/ SFT</td><td>(*Generated instance-specific prompt)</td><td>75.2</td><td>35.8</td></tr><tr><td>17</td><td>DSPw/SFT+RL</td><td>(*Generated instance-specific prompt)</td><td>84.0</td><td>38.6</td></tr></table></body></html>  \\n\\nSupervised fine-tuning details For supervised fine-tuning (SFT), we first run inference on the training set with the 14 human-crafted prompts tested in [ 26 ], respectively. We then selected those prompt and query pairs which resulted in a correct CoT reasoning outcome to form the training set for SFT. These query-prompt pairs were used to train a t5-base policy model for 2 epochs, with the model input being the query instance and the target output a trigger prompt.  \\n\\nRL training details After SFT, the prompts generated by the policy model were used to trigger InstructGPT for zero-shot CoT prompting. Reasoning accuracy was utilized as the reward for reinforcement learning (RL). A reward of 1 was assigned for correct reasoning results and 0 otherwise. We conducted 20 training iterations (106k episodes), with 5 epochs per batch, a batch size of 8, and a learning rate of 2e-6. The parameters for $\\\\mathrm{KL}_{\\\\mathrm{target}}$ and $\\\\beta_{0}$ were set to 0.5 and 0.001, respectively.  \\n\\nResults We compare the performance of using our generated instance-specific prompts with using the 14 human-crafted prompts which we used as the pseudo-stimulus to constitute the training set for SFT and also the prompt automatically discovered by the APE approach [ 79 ]. Note that all these 15 prompts are general task-specific and are used for the whole test set while ours are instance-specific. The performance comparison is shown in the Table 8. As can be seen, InstructGPT’s performance varies significantly when using different task-specific prompts. Compared to the 14 task-specific human-designed prompts, DSP enhances the performance with instance-specific prompts. It also outperforms the prompt discovered by the APE approach. Solely relying on supervised fine-tuning of the policy model with the dataset comprising the 14 human-designed prompts doesn’t lead to its peak performance. After fine-tuning with RL, the policy model is encouraged to explore better instance-specific trigger prompts, further improving performance.\\n\\n# 4 Related work\\nBlack-box large language models Recent years have witnessed the emergence of LLMs such as GPT-3 [ 6 ], Codex [ 9 ], InstructGPT, ChatGPT [ 46 ], PaLM [ 10 ], and LaMDA [ 66 ], which show significant promise in the field of NLP. These LLMs typically have a large number of parameters and require vast amounts of training data. Due to their scaling, these models have exhibited many emergent abilities, such as in-context learning, few-shot prompting, chain-of-thought prompting, and instruction following [ 6 ,46 ,69 ]. However, most LLMs are not open-sourced and can only be accessed via black-box APIs, through which the users send prompt queries and receive responses.  \\n\\nWhile there exist open-source LLMs such as OPT-175B [ 73 ] and Bloom [ 58 ], their local execution and fine-tuning require significant computational resources that may be infeasible for most researchers and users. However, despite their considerable performance on various tasks, LLMs often fall short of generating outputs that fully align with desired outputs on specific downstream tasks and use cases [ 16 ,42 ,18 ]. Our approach seeks to address this limitation by introducing directional stimulus generated by a small tunable LM into the prompt to provide more fine-grained guidance and control over black-box LLMs.  \\n\\nPrompt optimization and engineering Efficiently optimizing pre-trained LMs on downstream tasks by finding optimal prompts has been a focus of prior research. One approach involves tuning soft prompts, which are continuous embedding vectors that can be optimized using gradient descent methods [ 32 ,30 ,67 ,2 ,64 ]. However, the requirements of gradients and the challenge of passing gradients and continuous prompts through black-box APIs, making them less practical for the blackbox LLMs. Researchers have also tried to seek optimal prompts by designing task-specific natural language instructions and selecting proper training samples as in-context demonstrations in the prompt. These methods include manual engineering [ 50 ,6 ,56 ], editing [ 61 ,76 ], reinforcement learning [ 13 ,39 ], and automatic generation [ 79 ]. Despite these efforts, such prompts are not always effective at steering LLMs to generate desired outputs, especially for fine-grained instance-specific behaviors that are difficult to describe using task-specific instructions and demonstration examples. To address this limitation, our approach is able to provide more fine-grained instance-specific guidance generated by a small tunable policy model optimized with supervised fine-tuning and/or reinforcement learning.  \\n\\nControllable text generation The control of language models (LMs) has been extensively studied. Early approaches fine-tuned LMs on datasets containing desired attributes [ 17 ]. [ 24 ] proposed class-conditioned LMs, generating text with predefined control codes. However, direct LM training is costly. To address this, PPLM [ 12 ] trains an attribute model and passes gradients to control generation. GeDi [ 27 ] and DExperts [ 36 ] use class-conditional distributions as generative discriminators to guide generation, reducing computation complexity. These methods require either additional LM training or internal gradients and logistics, making them not applicable to black-box LLMs. Our approach proposes a solution to control black-box LLMs by inserting directional stimulus into the input query prompt and optimizing based on the return output.  \\n\\nReinforcement learning for NLP Reinforcement learning has been successfully applied to various NLP tasks, such as syntactic parsing [ 44 ,29 ], machine translation [ 71 ,28 ], summarization [ 48 ,62 ], conversational systems [ 31 ], etc. Language models define probability distributions over tokens in their vocabulary, and the text generation problem can be naturally formulated as selecting an action in an RL setting. Therefore, there have been extensive research efforts on optimizing LMs with RL, usually by aligning them with human preferences [ 80 ,70 ,40 ,62 ]. For example, the LLM InstructGPT [ 46 ] is optimized with RL to better follow users’ instructions and intent. In contrast with these works that directly update the LLMs to align with human preferences, our work optimizes a small policy model that generates text (stimulus) to guide LLMs to generate more human-preferred output instead of directly optimizing the LLMs, bypassing the inefficient LLM’s optimization.\", 'reference': '[1] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6'}, 2: {'id': 2, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[2] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 3: {'id': 3, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[3] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 4: {'id': 4, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[4] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}, 5: {'id': 5, 'title': 'Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models', 'content': '# Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\\nBilgehan Sel 1 , Ahmad Al-Tawaha 1 , Vanshaj Khattar 1 , Lu Wang 2 , Ruoxi Jia 1 and Ming Jin 1 1 Virginia Tech 2 Microsoft\\n\\n# Abstract\\nCurrent literature, aiming to surpass the “Chain-of-Thought” approach, often resorts to an external modus operandi involving halting, modifying, and then resuming the generation process to boost Large Language Models’ (LLMs) reasoning capacities. This mode escalates the number of query requests, leading to increased costs, memory, and computational overheads. Addressing this, we propose the Algorithm of Thoughts —a novel strategy that propels LLMs through algorithmic reasoning pathways, pioneering a new mode of in-context learning. By employing algorithmic examples, we exploit the innate recurrence dynamics of LLMs, expanding their idea exploration with merely one or a few queries. Our technique outperforms earlier single-query methods and stands on par with a recent multi-query strategy that employs an extensive tree search algorithm. Intriguingly, our results suggest that instructing an LLM using an algorithm can lead to performance surpassing that of the algorithm itself, hinting at LLM’s inherent ability to weave its intuition into optimized searches. We probe into the underpinnings of our method’s efficacy and its nuances in application.', 'reference': '[5] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'}, 6: {'id': 6, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[6] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}, 7: {'id': 7, 'title': 'Do Language Models Exhibit the Same Cognitive Biases in Problem Solving As Human Learners?', 'content': '# C. Additional Results\\n\\n# C.1. Child-Persona Prompting\\nWe investigate whether varying the prompting strategy could influence the model’s biases. Specifically, we employ a modified version of the zero-shot chain-of-thought prompt, tailored to simulate a child’s reasoning process. We prompt the model with the phrase “Let’s think step by step as a gradeschool child would,” replacing the standard CoT instruction. Following this, we apply the same decoding method used in traditional CoT. The results for this approach are reported in Table 4 . While we notice larger consistency and transfer vs comparison effects for some models, we observe no substantial departure from the results achieved with conventional CoT prompting.\\n\\n# C.2. Bias Strength by Number of Reasoning Steps\\nFigs. 4 and 5 illustrate how the strength of the measured biases (consistency and carry, respectively) change in relation to the number of reasoning steps in a problem (in the CoT-prompted case). Unlike for the transfer vs comparison bias (§ 5.3 ), we do not observe a consistent difference in the trend between pre-trained-only and instruction-tuned models.  \\n\\n<html><body><table><tr><td rowspan=\"3\">Mode</td><td rowspan=\"3\">Model</td><td colspan=\"4\">Consistencybias ($5.2)</td><td colspan=\"4\">Transfer vs comparison bias ($5.3)</td><td colspan=\"4\">Carry effect ($5.4)</td></tr><tr><td colspan=\"2\">Accuracy (%)</td><td colspan=\"2\"></td><td rowspan=\"2\"></td><td colspan=\"2\">Accuracy (%)</td><td rowspan=\"2\">p-val</td><td colspan=\"2\">Accuracy (%)</td><td rowspan=\"2\"></td><td rowspan=\"2\">p-val</td></tr><tr><td>Co</td><td></td><td>InCo CATE</td><td>p-val</td><td>C</td><td>CATE</td><td>NCa</td><td>Ca</td></tr><tr><td rowspan=\"8\">Child CoT</td><td>LLaMA27B</td><td>14.6</td><td>5.0</td><td>9.6</td><td><0.001</td><td>T 19.8</td><td>11.6</td><td>8.2</td><td><0.001</td><td>40.0</td><td>44.4</td><td>CATE -4.4</td><td>0.048</td></tr><tr><td>LLaMA213B</td><td>20.0</td><td>5.0</td><td>15.0</td><td><0.001</td><td>35.0</td><td>7.0</td><td>28.0</td><td><0.001</td><td>20.8</td><td>21.0</td><td>-0.2</td><td>0.903</td></tr><tr><td>Mistral7B</td><td>39.2</td><td>17.2</td><td>22.0</td><td><0.001</td><td>48.8</td><td>26.6</td><td>22.2</td><td><0.001</td><td>58.4</td><td>57.2</td><td>1.2</td><td>0.415</td></tr><tr><td>Mixtral8x7B</td><td>66.2</td><td>34.6</td><td>31.6</td><td><0.001</td><td>69.8</td><td>49.4</td><td>20.4</td><td><0.001</td><td>70.4</td><td>69.4</td><td>1.0</td><td>0.701</td></tr><tr><td></td><td>55.2</td><td>24.2</td><td></td><td></td><td></td><td></td><td>28.8</td><td><0.001</td><td>67.8</td><td></td><td>4.8</td><td></td></tr><tr><td>LLaMA27BChat LLaMA213BChat</td><td>65.2</td><td>27.0</td><td>31.0 38.2</td><td><0.001 <0.001</td><td>62.6 79.8</td><td>33.8 48.2</td><td>31.6</td><td><0.001</td><td>80.0</td><td>63.0 77.0</td><td>3.0</td><td>0.069 0.108</td></tr><tr><td>Mistral 7BInstr.</td><td>65.0</td><td>30.6</td><td>34.4</td><td><0.001</td><td>75.2</td><td>52.8</td><td>22.4</td><td><0.001</td><td>77.2</td><td>74.4</td><td>2.8</td><td>0.178</td></tr><tr><td>Mixtral8x7BInstr.</td><td>88.6</td><td>72.4</td><td>16.2</td><td><0.001</td><td>98.8</td><td>82.4</td><td>16.4</td><td><0.001</td><td>97.6</td><td>97.4</td><td>0.2</td><td>0.809</td></tr></table></body></html>  \\n\\nTable 4. Accuracy, conditional average treatment effect (CATE), and statistical significance (p-value) on math word problems generated for the three tests detailed in $\\\\S5.2$ ,$\\\\S5.3$ and $\\\\S5.4$ . ‘Co’ denotes consistent, ‘InCo’ inconsistent, $\\\\mathbf{\\\\nabla}^{\\\\leftarrow}\\\\mathbf{T}$ transfer, $\\\\mathbf{\\\\ddot{C}}^{,}$ comparison, ‘Ca’ carry, and $\\\\mathbf{^{\\\\prime}N C a}^{\\\\prime}$ no carry conditions. The results presented are for the child-persona prompting strategy described in Footnote 12 and App. C.1 .‘Chat’ and ‘Inst.’ indicate the instruction-tuned versions of the models. CATE values are bolded when $p<0.01$ .', 'reference': '[7] Do Language Models Exhibit the Same Cognitive Biases in Problem Solving As Human Learners?, ICML, 2024, chunk 7'}, 8: {'id': 8, 'title': 'Semantic Residual Prompts for Continual Learning', 'content': '# 5 Conclusion\\nThis paper presents STAR-Prompt, a prompting method for Continual Learning based on three main novelties. First, we strengthen the stability of the prompt selection mechanism using a foundation model and two levels of prompt tuning. Second, we replace standard prompt token concatenation with additive residuals, which transfer semantics into its MLP layers. Finally, we use a simple generative replay based on a multi-modal representation of the feature distributions. Each part of STAR-Prompt brings a significant contribution, leading it to outperform the CL state of the art.', 'reference': '[8] Semantic Residual Prompts for Continual Learning, ECCV, 2024, chunk 7'}, 9: {'id': 9, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 5 Related Work\\nMemorization and Reasoning in PLMs. With the recent success of large-scale pre-trained language models (PLMs), there has been growing interest in investigating what is captured by these PLMs during pre-training ( Talmor et al. ,2020a ;Rogers et al. ,2020 ;Kassner et al. ,2020 ). Studies have shown that in addition to learning linguistic knowledge about language use, PLMs are capable of memorizing a great amount of world knowledge ( Rogers et al. ,2020 ), yielding competitive performance on knowledge probing ( Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ) and other knowledge-intensive tasks such as question answering ( Roberts et al. ,2020 ) and fact checking (Lee et al. ,2020 ), without resorting to any external knowledge source. On the other hand, other work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in recalling their stored knowledge for multi-step inferences, such as answering complex, multi-hop  \\n\\nFor existing work on learning-based prompting, (Shin et al. ,2020 ) proposes to use gradient-guided search to find appropriate discrete prompt tokens in the PLM’s vocabulary to form prompt templates. While the resulting prompts are readable, most of them have very low fluency and interpretability. (Zhong et al. ,2021 ;Qin and Eisner ,2021 ) propose to optimize the prompts in continuous space instead, which shows large benefits in both effectiveness and optimization efficiency. ( Zhong et al. ,2021 ) also raises and studies the question about whether learning-based prompting could exploit spurious dataset regularities which would weaken the validity of standard evaluation results, a concern we seriously address in our work. ( Lester et al. ,2021 ;Li and Liang ,2021 ) follow the continuous prompting paradigm, and tune task-level prompts for lightweight adaptation of PLMs. Overall, existing prompt learning methods are either restricted to cases where there exists a single & identifiable relation/predicate within the query ( Zhong et al. ,2021 ;Qin and Eisner ,2021 ;Shin et al. ,2020 ), or being static and not sensitive to sample-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ).  \\n\\nIterative Knowledge Retrieval. We are also inspired by methods that iteratively retrieve knowledge from explicit knowledge sources for multistep reasoning, such as ( Xiong et al. ,2021 ;Qi et al. ,2019 ). Our problem setting could be viewed as iterative retrieval over implicit knowledge in PLMs, instead of from explicit knowledge sources.\\n\\n# 6 Conclusion & Future Work\\nWe explore an iterative prompting framework towards driving a “train of thought” from PLMs for multi-step reasoning tasks. We show the superiority of this iterative scheme, and also effectiveness of our proposed context-aware prompter design, which addresses key limitations of previous prompting methods when applied in this new scheme. In addition, we conduct both quantitative & qualitative analysis on the faithfulness of the learned prompting behaviors. In the future, we aim to further extend and apply our ideas to Language Model pretraining, with the hope that PLMs can be inherently equipped with stronger multi-step reasoning capabilities.\\n\\n# Acknowledgement\\nThe authors would like to thank the OSU NLP group members for their thoughtful comments. This research was sponsored in part by Google Faculty Award, NSF IIS-1815674, NSF CAREER #1942980, NSF OAC-2112606, and Ohio Supercomputer Center ( Center ,1987 ).\\n\\n\\n\\n# A Appendix\\n\\n# A.1 Hyperparameters\\nWe set the batch size to be 32, 128, 32 and train for 70, 50, 40 epochs for 2Wiki, LoT & R4C respectively. Table 5 summarizes other hyperparameters used in our experiments.  \\n\\nTable 5: Hyperparameter settings for all compared methods. lr: learning rate, pt_len: prompt length.   \\n\\n\\n<html><body><table><tr><td colspan=\"2\">2Wiki</td><td colspan=\"2\">LoT</td><td colspan=\"2\">R4C</td></tr><tr><td></td><td>lr pt_len</td><td>lr</td><td>pt_len</td><td>lr</td><td>pt_len</td></tr><tr><td>Prompt-T</td><td>8e-3</td><td>80 4e-3</td><td>80</td><td>4e-3</td><td>60</td></tr><tr><td>Prefix-T</td><td>8e-4</td><td>80 4e-4</td><td>60</td><td>4e-4</td><td>80</td></tr><tr><td>PLM-FT</td><td>4e-5</td><td>4e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>PLM-QA</td><td>4e-5</td><td>8e-5</td><td></td><td>4e-5</td><td></td></tr><tr><td>Ours</td><td>8e-5</td><td>30</td><td>8e-5 60</td><td>8e-5</td><td>30</td></tr></table></body></html>\\n\\n# A.2 More Examples on Prompter Attention Visualizations\\nFigure 5: Prompter attention visualization. Reasoning type: Comparison.  \\n\\n  \\nFigure 4 ,5 ,6 ,7 show additional example prompter attention visualizations in the 2Wiki dataset, each corresponding to a different reasoning type (composition, comparison, inference & bridge-comparison respectively).   \\nFigure 4: Prompter attention visualization. Reasoning type: Composition.  \\n\\n  \\nFigure 6: Prompter attention visualization. Reasoning type: Inference.  \\n\\n<html><body><table><tr><td>prompt</td><td>prompt</td><td></td><td></td><td></td><td>prompt</td><td>prompt</td><td></td><td>prompt</td><td>prompt</td><td>Which</td><td></td><td>film</td><td></td><td>prompt Which whose director Is</td></tr><tr><td>Which film whose</td><td></td><td>Which film whose ?</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film whose</td><td></td><td>Which film who</td><td></td><td>film whose</td><td></td><td></td></tr><tr><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is younger</td><td></td><td>director is</td><td></td><td>director is younger</td><td></td><td>drector Is</td><td></td><td>younger</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>younger</td><td></td><td></td><td></td><td>younger</td><td></td><td></td></tr><tr><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh uoo</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh oon</td><td></td><td>Kh oon</td></tr><tr><td>uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>Ka</td><td></td><td>oon Ka</td><td></td><td>oon Ka</td><td></td><td>Ka</td><td></td><td>Ka</td></tr><tr><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>Kh</td></tr><tr><td>Kh uoo</td><td></td><td>Uoo</td><td></td><td>oon</td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>oon</td><td></td><td>oon</td><td></td><td>oon or</td></tr><tr><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td>or</td><td></td><td></td><td>or</td><td>or</td><td></td><td>or</td><td></td><td>Idaho</td></tr><tr><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Idaho</td><td></td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td><td></td><td>Transfer</td></tr><tr><td>Transfer ?</td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transf</td><td></td><td>Transfer</td><td></td><td>Transfer</td><td></td><td>Transfer ?</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>？</td><td></td><td>?</td><td></td><td></td><td>?</td><td>?</td><td></td><td></td><td>?</td><td></td></tr><tr><td></td><td></td><td></td><td></td><td>S</td><td></td><td>S</td><td></td><td></td><td>S</td><td>S</td><td></td><td>S</td><td></td><td>S</td></tr><tr><td></td><td></td><td></td><td></td><td>oh</td><td></td><td>oh</td><td></td><td></td><td>oh</td><td>oh</td><td></td><td>oh</td><td>oh</td></tr><tr><td></td><td></td><td></td><td></td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td><td>rab</td><td></td><td>rab</td><td></td><td>rab</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>Modi</td><td>Modi</td><td></td><td></td><td>Modi</td></tr><tr><td></td><td></td><td></td><td>!pow</td><td></td><td>Modi</td><td></td><td>is</td><td></td><td></td><td>Modli</td><td></td></tr><tr><td></td><td></td><td></td><td>is</td><td></td><td></td><td></td><td></td><td></td><td></td><td>Is</td><td>is</td></tr><tr><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td></td><td>director of</td><td>director of</td><td></td><td>drector</td><td>drector</td></tr><tr><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td></td><td>Kh</td><td>Kh</td><td></td><td>of Kh</td><td>of Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>Kh</td><td></td><td>oon</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td>uoo</td><td></td><td>Ka</td><td>Ka</td><td></td><td>Ka</td><td>Ka</td></tr><tr><td></td><td></td><td></td><td>Ka</td><td></td><td>Ka Kh</td><td></td><td>Kh</td><td>Kh</td><td></td><td>Kh</td><td>Kh</td></tr><tr><td></td><td></td><td></td><td>Kh</td><td></td><td>oon</td><td></td><td>uoo</td><td>oon</td><td></td><td>oon</td><td>oon</td></tr><tr><td></td><td></td><td></td><td>uoo</td><td></td><td></td><td></td><td>Peter</td><td>Peter</td><td></td><td>Peter</td><td>Peter</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>F</td><td>F</td><td></td><td>F</td><td>F</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>onda</td><td>onda</td><td>onda</td><td></td><td>onda</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>is</td><td></td><td>is</td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td>director</td><td></td><td>director</td><td>director</td><td></td><td>director</td></tr><tr><td>PeterFondaisdirector of IdahoTransfer,</td><td></td><td></td><td></td><td></td><td></td><td>of</td><td></td><td>of</td><td>of</td><td></td><td>of</td></tr><tr><td>2November1897isdateof birthof SohrabModi, February 23, 1940 is date of birth of Peter Fonda]</td><td></td><td></td><td></td><td></td><td></td><td>Idaho</td><td></td><td>Idaho</td><td>Idaho</td><td></td><td>Idaho</td></tr><tr><td>Cq: [ Sohrab Modi is director of Khoon Ka Khoon,</td><td></td><td></td><td></td><td></td><td></td><td>Transfer</td><td></td><td>Transfer</td><td>Transfer November 1897 date birth Modi</td><td>2 is of of S oh rab</td><td>Transfer 2 November 1897 date of birth of S oh Modi</td></tr><tr><td>q: Which film whose director is younger, Khoon Ka Khoon or Idaho Transfer?</td><td></td><td></td><td></td><td></td><td></td><td></td></table></body></html>', 'reference': '[9] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[1] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 2: {'id': 2, 'title': 'ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness', 'content': '# Acknowledgements\\nWe thank the reviewers and the area chairs for their helpful comments. We also thank Peter Hase, Prateek Yadav, and Shiyue Zhang for their feedback. This work was supported by NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, and a Google Ph.D. Fellowship. The views contained in this article are those of the authors and not of the funding agency.\\n\\n# Limitations\\nAn interesting assumption for future work to address is that all knowledge typically needed to evaluate the correctness of a reasoning step is explicitly present as part of the input or the intermediate reasoning steps. In scenarios where correctness depends on implicit knowledge, we rely on the choice of underlying models (described in Appendix A )which are built on top of pre-trained LMs and are known to capture a lot of background knowledge ( Petroni et al. ,2019 ;Roberts et al. ,2020 ). However, inferences that rely on substantial implicit knowledge may not be best evaluated through current metrics. While current evaluation frameworks focus on evaluating the quality of modelgenerated reasoning chains, Wei et al. (2022 ) note that the chain itself may not faithfully reflect the internal reasoning process of the model. This remains an open question for future work to address.', 'reference': '[2] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 2: {'id': 2, 'title': 'Complexity-Based Prompting for Multi-Step Reasoning', 'content': '# 5 CONCLUSION\\nThis paper proposes a new complexity-based instance selection scheme for prompting language models to perform multi-step reasoning. In addition to substantial performance improvements on math word reasoning tasks, our methods exhibit multiple advantages such as being intuitive, annotation-efficient, and robustly effective in different in-context learning settings. We hope this work will open new research possibilities in prompting, language models, and multi-step reasoning.\\n\\n\\n\\n# A A PPENDIX\\nYou may include other additional sections here.', 'reference': '[2] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:08\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m111\u001B[0m - \u001B[37m查询结果: {0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models', 'content': '# 6 CONCLUSIONS\\nExisting prompting approaches for LLM reasoning cannot leverage the insights of solving similar problems and suffer from accumulated errors in multi-step reasoning, due to reasoning from scratch. To address these issues, we propose Thought Propagation (TP), which explores analogous problems to yield a refined solution or a knowledge-intensive plan in an analogical approach to facilitate new problem-solving. TP is compatible with existing prompting methods, showing plug-and-play generalization and enhancement to a wide range of tasks such as Shortest-path Planning, Creative Writing, and LLM-Agent Planning. Future directions would further enhance the performance and efficiency of the proposed framework.', 'reference': '[1] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6'}, 2: {'id': 2, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[2] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}, 3: {'id': 3, 'title': 'Exploring Chain-of-Thought Style Prompting for Text-to-SQL', 'content': '# 2 Related Work\\nLarge Language Models and Prompting. As large language models (LLMs) advance ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ), in-context learning emerged as a new paradigm in natural language processing ( Liu et al. ,2023b ). Although LLMs can achieve outstanding performance by prompting them with few-shot examples in context, they struggle with tasks that require complex reasoning. As a solution, Wei et al. (2022b ) proposed chain-of-thought prompting. By explicitly describing intermediate reasoning steps to answer a complex question in the prompts, chain-of-thought prompting improves the accuracy of LLMs by a large margin across several natural language reasoning tasks. However, chain-of-thought prompting has a key limitation, where it often performs poorly on tasks that require generalization of solving problems harder than the demonstration examples, such as compositional generalization ( Zhou et al. ,2023 ). Our work systematically explores chain-of-thought style prompting methods for the text-to-SQL parsing task. Additionally, we propose a new chain-ofthought style prompting method that guides LLMs to perform complex reasoning via question decomposition. We show that text-to-SQL parsing indeed requires multi-step reasoning, and chain-of-thought style prompting can help LLMs to achieve higher parsing accuracy.  \\n\\nQuestion Decomposition. Question decomposition is a method that facilitates QA models by converting a complex problem into a sequence of simpler subquestions ( Gupta and Lewis ,2018 ;Min et al. ,2019 ). In light of question decomposition, Zhou et al. (2023 ) proposed Least-to-Most prompting to solve complex problems with better compositional generalization in two stages. The method first prompts LLMs to generate a list of subquestions as a decomposition of the given problem. Then, it uses the subquestions to guide LLMs to incrementally solve each of them and derive a correct final answer. Our work is related to Wolfson et al. ,2020 ,2022 , which applies question decomposition to text-to-SQL parsing, but we explore question decomposition for text-to-SQL parsing under in-context learning context and propose to leverage question decomposition as a novel chainof-thought style prompting. We conduct comprehensive experiments and show that our question decomposition prompting outperforms the two widely used methods, chain-of-thought prompting and least-to-most prompting, on several text-to-SQL datasets.  \\n\\nText-to-SQL Semantic Parsing. Text-to-SQL semantic parsing has long been studied to build natural language interfaces for database applications ( Dahl et al. ,1994 ;Zelle and Mooney ,1996 ). Since the release of Spider ( Yu et al. ,2018 ), a crossdatabase text-to-SQL benchmark, many parsers have been developed on top of language models to better understand various database schemas (Wang et al. ,2020 ;Yu et al. ,2021 ;Deng et al. ,2021 ). Recent work starts to explore the potential of LLMs, such as Codex ( Chen et al. ,2021 ), in textto-SQL parsing by including database schemas in the prompts ( Rajkumar et al. ,2022 ), retrieving similar questions as few-shot examples ( Hongjin et al. ,2023 ), or reranking SQL parses with their execution results ( Ni et al. ,2023 ). Our work is in parallel with these methods and extends this line by teaching LLMs to become a better text-to-SQL parser by itself without additional engineering efforts or introducing new modules. With our question decomposition prompting, an LLM, such as Codex in our experiments, can effectively learn to decompose natural language questions and predict table and column names (Section 3 ) incrementally in each step with a few in-context examples.\\n\\n# 3 Prompting for Multi-Step Reasoning in Text-to-SQL\\nIn this section, we outline three prompting methods to guide an LLM to progressively derive a sequence of reasoning steps and then generate the target SQL query. We first describe how we adopt chain-of-thought and least-to-most prompting for text-to-SQL parsing. Moreover, we introduce a new prompting method, question decomposition prompting ( QDecomp ) and its variant ( QDecomp $^+$ InterCOL ). Figure 1 demonstrates different prompting methods and more examples are provided in Appendix A . For all experiments, we use Codex ( Chen et al. ,2021 ), code-davinci-002 , as the LLM. The experiments were conducted between January and March 2023 through OpenAI $\\\\mathrm{API}^{2}$ , using greedy decoding with temperature 0.\\n\\n# 3.1 Chain-of-Thought Prompting\\nChain-of-thought prompting ( Wei et al. ,2022b )aims to improve LLMs’ reasoning ability by generating a coherent series of intermediate steps before predicting the final answer. For text-to-SQL parsing, one challenge is how to come up with intermediate reasoning steps. We are inspired by the logical execution process of SQL queries, as adopted in Narechania et al. (2021 ) to construct an interactive natural language interface. For the SQL query in Figure 1 (a), it has a logical execution order of FROM , followed by WHERE , and then SELECT .Following the execution order, we put together a natural language description of all clauses as the intermediate reasoning steps for the in-context examples in CoT, as shown in Figure 1 (a).', 'reference': '[3] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2'}, 4: {'id': 4, 'title': 'Self-Consistency Improves Chain of Thought Reasoning in Language Models', 'content': '# Self-Consistency Improves Chain of Thought Reasoning in Language Models\\nXuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed H.Chi, Denny Zhou Google Research, Brain Team {xuezhiw, jasonwei, schuurmans, qvl, edchi, dennyzhou}@google.com\\n\\n# Abstract\\nWe explore a simple ensemble strategy, self-consistency , that significantly improves the reasoning accuracy of large language models. The idea is to sample a diverse set of outputs from a language model and return the most consistent answer in the set. Such ensembling method improves reasoning accuracy when combined with chain of thought prompting. For arithmetic and commonsense reasoning benchmarks we find that self-consistency yields significant accuracy improvements in a variety of datasets, such as GSM8K $(+10\\\\%)$ , SVAMP $(+14\\\\%)$ , MultiArith $(+24\\\\%)$ , CommonsenseQA $(+5\\\\%)$ and ARC (easy $+4\\\\%$ , challenge $+5\\\\%$ ).\\n\\n# 1 Introduction\\nAlthough language models have demonstrated remarkable success across a range of NLP tasks, their ability to demonstrate reasoning is often seen as a limitation, which cannot be overcome solely by increasing model scale ( Rae et al. ,2021 ;BIG-bench collaboration ,2021 ,inter alia ). In response, Wei et al. (2022 ) have proposed chain of thought prompting , which prompts language models to generate a series of short sentences that mimic the reasoning process a person might employ. For example, given the question “Shawn has five toys. He gets two more each from his mom and dad. How many does he have now?” , instead of directly responding with “9” , we could prompt a language model to respond with “Shawn started with 5 toys. 2 toys each from his mom and dad is 4 more toys. The final answer is $5+4{=}9.$ ”. Chain of thought prompting has been shown to significantly improve language model performance in a variety of multi-step reasoning tasks ( Wei et al. ,2022 ).  \\n\\nIn this paper, we introduce a simple method, self-consistency , that further improves the accuracy of chain of thought reasoning, often by a significant margin. Self-consistency leverages the intuition that complex reasoning tasks typically admit multiple reasoning paths that reach a correct answer (Stanovich & West ,2000 ). The more a reasoning task requires deliberate thinking and analysis (Evans ,2010 ), the greater the diversity of reasoning paths that can recover the answer. The method we propose first prompts the language model with example chains of thought, then generates a diverse set of reasoning paths by sampling from the model’s decoder. Each reasoning path might lead to a different final answer, so we determine the optimal answer by taking a plurality or majority vote—i.e., the most commonly occurring answer (corresponding to a majority vote in the special case of only two alternatives). This approach is analogous to human experience that if multiple reasoning paths lead to the same answer, we have greater confidence that the final answer is correct. Figure 1 illustrates the self-consistency method with an example.  \\n\\n  \\nFigure 1: The self-consistency method contains three steps: (1) prompt a language model using example chains of thought; (2) sample from the language model’s decoder to generate a diverse set of reasoning paths; and (3) choose the most consistent answer using the majority/plurality vote.  \\n\\nThe self-consistency method is far simpler than previous approaches, which either train an additional verifier ( Cobbe et al. ,2021 ), or train a re-ranker given additional human annotations to improve generation quality ( Thoppilan et al. ,2022 ). By contrast, our approach is entirely unsupervised , works off-the-shelf with pre-trained language models, requires no additional human annotation, and avoids any additional training or fine-tuning.  \\n\\nWe evaluate self-consistency on a range of arithmetic reasoning and commonsense reasoning tasks, and find that it improves the reasoning ability of language models by a striking margin. Compared to generating a single chain of thought via greedy decoding ( Wei et al. ,2022 ), self-consistency contributes additional absolute improvements of $+10.6\\\\%$ on the recent grade-school-math dataset (GSM8K; Cobbe et al. ,2021 ), $+14.4\\\\%$ on a recently-compiled challenge dataset over math word problems (SVAMP; Patel et al. ,2021 ), and $+23.9\\\\%$ on MultiArith ( Roy & Roth ,2015 ). For commonsense reasoning, we also observe significant gains in CommonsenseQA ( Talmor et al. ,2019 )$(+5\\\\%)$ ,and the AI2 Reasoning Challenge (ARC) dataset ( Clark et al. ,2018 ), with $+4\\\\%$ and $+4.7\\\\%$ absolute accuracy improvement in the easy and challenge sets, respectively. In additional experiments, we also evaluate self-consistency on alternative large language models, compare against other sampling strategies, and perform ablations on various aspects of the method.\\n\\n# 2 Self-Consistency over Diverse Reasoning Paths\\nA feature of humanity is that people think differently. It is natural to posit that in tasks requiring deliberate thinking, there are likely several ways to attack the problem, all of which lead to the same answer. We propose that such a process can be simulated in language models via sampling from the language model’s decoder. For instance, as shown in Table 1 , a model can generate several plausible responses to a math question that all arrive at the same correct answer (Outputs 2, 4, and 5). Since language models are not perfect reasoners, the model might also produce an incorrect reasoning path or make a mistake in one of the reasoning steps (e.g., in Output 1 and 3), but such solutions are less likely to arrive at the same answer ( 26 and 14 in Table 1 ). That is, we hypothesize that correct reasoning processes, even if they are diverse, tend to have greater agreement in their final answer than incorrect processes.  \\n\\nWe leverage this intuition by proposing the following self-consistency method. First, a language model is prompted with a set of manually written chain of thought exemplars ( Wei et al. ,2022 ). Next, we sample a set of candidate outputs from the language model’s decoder ( Ackley et al. ,1985 ;Ficler & Goldberg ,2017 ;Fan et al. ,2018 ;Holtzman et al. ,2018 ;Radford et al. ,2019 ;Holtzman et al. ,2020 ), which produces diversity in the set of generated reasoning paths. Finally we ensemble the results by choosing the answer that is the most consistent among the generated answers. In an experimental investigation, we show that combining chain of thought prompting with self-consistency leads to substantially better results than simply using chain of thought alone with a single generated path.  \\n\\nmore detail, assum $a^{*}$ is drawn from a fixed answer set, $a^{*}\\\\in A$ . We sample $m$ candidate output $\\\\{r_{i},a_{i}\\\\},i\\\\in{1,\\\\dots,m}$ {}∈from e decoder, where each generated output consists of a reasoning path $r_{i}$ , along with a final answer $a_{i}$ . As an example, consider Output 5 from Table 1 :the first few sentences “ She eats 3 for breakfast every morning ... So she has $9\\\\,e g g s*\\\\mathcal{S}2=\\\\mathcal{S}I\\\\mathcal{S}.$ .”  \\n\\nQuestion :Janet’s ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. How much in dollars does she make every day at the farmers’ market? (Ground truth answer is 18 .)  \\n\\nGreedy Decode : She eats 3 for breakfast every morning and bakes muffins for her friends every day with 4. This means she eats $3+4=7$ eggs every day. She sells the remainder at the farmers’ market daily for $\\\\mathbb{S}2$ per fresh duck egg. So she sells $7\\\\ ^{*}\\\\mathbb{S}2=\\\\mathbb{S}14$ per day. The answer is 14 .', 'reference': '[4] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0'}, 5: {'id': 5, 'title': 'Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models', 'content': '# Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\\nBilgehan Sel 1 , Ahmad Al-Tawaha 1 , Vanshaj Khattar 1 , Lu Wang 2 , Ruoxi Jia 1 and Ming Jin 1 1 Virginia Tech 2 Microsoft\\n\\n# Abstract\\nCurrent literature, aiming to surpass the “Chain-of-Thought” approach, often resorts to an external modus operandi involving halting, modifying, and then resuming the generation process to boost Large Language Models’ (LLMs) reasoning capacities. This mode escalates the number of query requests, leading to increased costs, memory, and computational overheads. Addressing this, we propose the Algorithm of Thoughts —a novel strategy that propels LLMs through algorithmic reasoning pathways, pioneering a new mode of in-context learning. By employing algorithmic examples, we exploit the innate recurrence dynamics of LLMs, expanding their idea exploration with merely one or a few queries. Our technique outperforms earlier single-query methods and stands on par with a recent multi-query strategy that employs an extensive tree search algorithm. Intriguingly, our results suggest that instructing an LLM using an algorithm can lead to performance surpassing that of the algorithm itself, hinting at LLM’s inherent ability to weave its intuition into optimized searches. We probe into the underpinnings of our method’s efficacy and its nuances in application.', 'reference': '[5] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'}, 6: {'id': 6, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[6] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 7: {'id': 7, 'title': 'Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution.', 'content': '# 2 RELATED WORK\\nPrompting an LLM in the right way is essential to its downstream performance ( Moradi & Samwald ,2021 ;Madaan & Yazdanbakhsh ,2022 ;Zhou et al. ,2023 ). Indeed, even the order in which prompts are presented can heavily influence LLM performance ( Lu et al. ,2022 ). A number of recent works have focused on devising better prompt strategies, or even automating such prompt engineering.  \\n\\nPrompting : Chain-of-Thought Prompting (CoT, Wei et al. ,2022 ) is a popular prompt strategy which provides intermediate reasoning steps as few-shot prompts to an LLM, thereby significantly improving its arithmetic, commonsense, and symbolic reasoning abilities. Notably, the gains of CoT are more pronounced for stronger LLMs. This is intriguing, as it points to the possibility of increasingly capable (and potentially open-ended) self-improving mechanisms on top of adept LLMs—a hypothesis that Promptbreeder directly builds upon. Instead of few-shot CoT prompting, Kojima et al. (2022 ) demonstrate that LLMs can also be prompted zero-shot (e.g. \"Let’s think step by step\" ) to produce their own chains of thoughts (Zero-shot CoT) that improve reasoning abilities. Self-Consistency (CoT-SC, Wang et al. ,2022 ) extends CoT by sampling a diverse set of workings out and selecting the most consistent answer. Tree of Thoughts (ToT, Yao et al. ,2023 ) generalizes CoT to multiple workings out that can be expanded or backtracked from. Graph of Thoughts (GoT, Besta et al. ,2023 ) is a further generalization to arbitrary graph structures. Plan-and-Solve Prompting (PS, Wang et al. ,2023b ) encourages an LLM to first devise a plan to solve a problem before attempting to solve it. Similarly, Least-to-Most Prompting ( Zhou et al. ,2022 ) encourages an LLM to decompose a problem into subparts, and then to solve each part individually before synthesizing an answer. Self-Refine ( Madaan et al. ,2023 ) prompts an LLM to generate a response, to provide feedback on the response, and to finally refine the solution.  \\n\\nIn contrast to gradient-free approaches above, Soft Prompting approaches (e.g., Liu et al. ,2021 ;Qin & Eisner ,2021 ;Lester et al. ,2021 ) directly fine-tune continuous prompt representations. Huang et al. (2022 ) use CoT and CoT-SC on an unlabelled dataset of questions, and subsequently finetune an LLM based on generated solutions. Similarly, Zelikman et al. (2022 ) uses CoT to generate rationales and fine-tunes the LLM based on those examples and rationales that yielded the correct answer. However, as argued by Zhou et al. (2023 ), any approach that updates all or a portion of LLM parameters will not scale as models get bigger and, moreover, will not work with the increasing number of LLMs hidden behind an API.  \\n\\nAll of the prompt engineering approaches above are domain agnostic but hand designed. Central to our work is the hypothesis that we could do better by employing an automated self-improvement process that can adapt prompts to a domain at hand. Auto-CoT ( Zhang et al. ,2023b ) and AutomaticCoT ( Shum et al. ,2023 ) automatically find reasoning chains for Few-Shot CoT. Automatic Prompt Engineer (APE, Zhou et al. ,2023 ) uses one generator-prompt to generate prompt candidates, and another mutation-prompt to mutate them. In contrast to APE, our work performs compositional task-specific initialization of mutation-prompts, subsequent online mutation of mutation-prompts, uses special mutation operators that take into account the whole population and elite history, and uses diversity-maintenance methods—all of which help avoid the problem of diminishing returns and diversity loss suffered by APE.  \\n\\nConcurrently to our work, Yang et al. (2023a ) developed Optimization by PROmpting (OPRO), a prompt optimization method that varies prompts using a single complex mutation prompt, and evaluates newly generated prompts on a small fixed training set of problems. In contrast, Promptbreeder autonomously evolves multiple LLM generated mutation-prompts as well as task-prompts, and evaluates fitness on random subsets from the whole training set during evolution. At the time of its release, OPRO achieved a score of $80.2\\\\%$ via the optimized zero-shot prompt \"Take a deep breath and work on this problem step-by-step\" on GSM8K. Promptbreeder surpasses this with $83.9\\\\%$ in the zero-shot setting with the unintuitively simple prompt \"SOLUTION\"\" —further evidence for the sensitivity of LLMs to prompts and the importance on finding effective prompts automatically. Also concurrently to our work, Guo et al. (2023 ) developed EvoPrompt, which uses a fixed mutation (and crossover) prompt, as well as a prompt that asks for a mutant of the difference between two parent prompts, to produce offspring prompts. EvoPrompt is initialized with a whole population of initial hand-designed task tailored prompts rather than a single problem description as we do. In contrast to the two approaches above, Promptbreeder uses LLMs to self-referentially improve mutation-prompts, and it is able to evolve contexts as well.  \\n\\nSelf-Referential Self-Improvement : Developing an open-ended system that can improve itself as well as improving the way it is improving itself ( Schmidhuber ,1993 ;2003 ) is a long-standing open problem in AI research. Schmidhuber (1993 ) introduced an “introspective” neural network with a self-referential weight matrix that can modify its own weights and, thus, also modify those weights that are governing how its own weights are modified. Recently, Irie et al. (2022 ) proposed a more scalable self-referential weight matrix taking inspiration from fast weight programmers ( Schmidhuber ,1992 ). Kirsch & Schmidhuber (2022 ) propose a self-referential meta-learning approach, combining self-referential weight matrices with ideas from G¨odel Machines ( Schmidhuber ,2003 ), i.e., to allocate more computational resources to better performing solutions. However, since these approaches directly modify parameters of a model, it is unclear how to scale them to the increasing number of parameters in modern LLMs. In contrast, for Promptbreeder the substrate of selfreferential self-improvement is natural language, avoiding costly parameter updates altogether.  \\n\\nOpen-Endedness and LLMs : Promptbreeder makes use of the observation by Lehman et al. (2022 ), Meyerson et al. (2023 ) and Chen et al. (2023 ) that LLMs are effective at generating mutations from examples. In addition, LLMs encode human notions of interestingness and can be used to automatically quantify novelty ( Zhang et al. ,2023a ). Promptbreeder is related to Picbreeder ( Secretan et al. ,2008 ), an open-ended human-in-the-loop system that evolves increasingly interesting images. While Picbreeder explores the space of images, Promptbreeder explores the space of prompts and does so without humans in the loop. As Promptbreeder is proposing mutated prompts to itself, it is an example of a system transitioning from “learning from data” to “learning what data to learn from” ( Jiang et al. ,2022 ).', 'reference': '[7] Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution., ICML, 2024, chunk 2'}, 8: {'id': 8, 'title': 'A Survey of Deep Learning for Mathematical Reasoning', 'content': '# 5.2 High-quality Reasoning Chains\\nEarly chain of thought work (e.g., Wei et al. (2022 )) mainly relies on a single human-annotated reasoning chain as a prompt. However, manually creating reasoning chains has two disadvantages. First, as tasks become more complex, current models may not be sufficient to learn to perform all necessary reasoning steps and cannot easily generalize to different tasks. Second, a single decoding process is vulnerable to incorrect inference steps, leading to an incorrect prediction as the final answer. To address this limitation, recent studies mainly focus on two aspects, (i) hand-crafting more complex demonstrations, which we refer to as process-based approaches (Zhou et al. ,2022 ;Chen et al. ,2022b ), (ii) leveraging ensemble-like methods, which we refer to as outcome-based approaches (Wang et al. ,2022 ;Li et al. ,2022a ).  \\n\\n<html><body><table><tr><td>Models</td><td>Engine (best performed)</td><td>ICL source</td><td>Rationale type</td><td>Rationale source</td><td>Postmethod</td></tr><tr><td>Few-shot-CoT (Weietal.,2022)</td><td>PaLM (540B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Self-Consistency-CoT (Wang et al., 2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Least-to-most CoT(Zhou et al.,2022)</td><td>Codex (175B)</td><td>Random</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Retrieval-CoT (Zhang et al., 2022b)</td><td>GPT-3 (175B)</td><td>Retrival</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>PromptPG-CoT (Lu et al.,2022b)</td><td>GPT-3 (175B)</td><td>RL</td><td>Language</td><td>Hand-crafted</td><td></td></tr><tr><td>Auto-CoT (Zhang et al.,2022b)</td><td>Codex (175B)</td><td>Clustering</td><td>Language</td><td>Auto-generated</td><td></td></tr><tr><td>Complexity-CoT (Fu et al.,2022)</td><td>GPT-3( (175B)</td><td>Complexity</td><td>Language</td><td>Hand-crafted</td><td>Self-consistency</td></tr><tr><td>Few-shot-PoT(Chen et al.,2022b)</td><td>GPT-3(175B)</td><td>Random</td><td>Code</td><td>Hand-crafted</td><td></td></tr></table></body></html>\\n\\nTable 6: In-context learning with large language models for mathematical reasoning. For GPT-3, all papers use the text -davinci -002 version; for Codex, all papers use the code -davinci -002 . RL is short for reinforcement learning.  \\n\\nProcess-based approaches aim to improve the chain-of-thought reasoning quality, especially for complex reasoning tasks. In least-to-most prompting ( Zhou et al. ,2022 ), the problem-solving process is implemented through two-stage prompting: (i) reducing a complex problem into a list of subproblems; (ii) solving these sub-problems sequentially, so that solving a given sub-problem is facilitated by the answers to previously solved subproblems. Similarly, Khot et al. (2022 ) leverage diverse decomposition structures and use different prompts to answer each sub-question. Apart from these multi-step reasoning methods, Chen et al. (2022b ); Gao et al. (2022 ) propose programof-thoughts (PoT), an alternative solution that uses large language models to express the reasoning process as a program. The computation is then relegated to an external computer, which executes the generated programs to derive the answer.  \\n\\nOutcome-based approaches acknowledge the potential incorrectness of an individual reasoning path, and instead use multiple reasoning paths ( Wang et al. ,2022 ;Li et al. ,2022a ). Selfconsistency ( Wang et al. ,2022 ) generates a set of reasoning paths by sampling from the language model, and marginalizes out the reasoning paths by choosing the most common answer. In addition to using sampling with a single prompt to produce multiple reasoning paths, Li et al. (2022a ) propose to introduce diverse prompts through “self teaching”, as a complementary solution to produce a higher degree of diversity.\\n\\n# 6 Discussion\\n\\n# 6.1 Analysis of Benchmarks\\nMulti-modal setting. Most existing benchmarks for mathematical reasoning have targeted the textual-only modality. However, visual elements can provide a rich source of quantitative information, making multi-modal datasets beneficial for reasoning over quantitative relations in natural images ( Lu et al. ,2022a ), abstract diagrams ( Lu et al. ,2021b ), figures ( Kahou et al. ,2017 ), and charts (Kafle et al. ,2018 ). Tables, which are commonly found in daily documents and contain hierarchically structured information, have also been the focus of tasks that require quantitative reasoning over textual and tabular context ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ;Lu et al. ,2022b ). In addition, recent datasets have been developed for mathematical reasoning grounded on conversations (Sun et al. ,2019 ;Zhang et al. ,2021 ;Chen et al. ,2022c ), as well as reports ( Chen et al. ,2022c ).  \\n\\nLow-resource setting. Despite the creation of various datasets, mathematical reasoning in lowresource settings remains largely under-explored. Pioneering research has developed mathematical reasoning benchmarks for financial ( Chen et al. ,2021c ;Zhu et al. ,2021 ;Zhao et al. ,2022 ) and scientific domains ( Lu et al. ,2022a ). Additionally, there have been attempts to build non-English datasets for Chinese ( Wang et al. ,2017 ;Qin et al. ,2020 ;Yu et al. ,2021a ) and Arabic ( Alghamdi et al. ,2022 ) for mathematical reasoning.  \\n\\nRationale annotations. Complex reasoning usually involves multiple steps to arrive at the final answer. To bridge this gap, datasets annotated with intermediate rationales such as logic forms ( Tafjord et al. ,2019 ;Lu et al. ,2021a ), programs ( Amini et al. ,2019 ;Chen et al. ,2021c ,a ;Cao and Xiao ,2022 ;Chen et al. ,2022a ), and reasoning graphs (Zhang et al. ,2021 ) have been proposed to train models for complex reasoning tasks. Python programs are used as reasoning annotations in ( Austin et al. ,2021 ;Mishra et al. ,2022a ) due to their enhanced accessibility and readability. To imitate the reasoning process of a human, a more recent trend is to annotate solutions in natural language ( Ling et al. ,2017 ;Cobbe et al. ,2021 ;Lu et al. ,2022b ;Hendrycks et al. ,2021 ;Lu et al. ,2022a ).  \\n\\nTable 7: Language models struggle with large numbers.   \\n\\n\\n<html><body><table><tr><td></td><td>T5 (Large)</td><td>UnifiedQA (Large)</td><td>GPT-3 (davinci-002)(davinci-003)</td><td>GPT-3</td></tr><tr><td>3balls+5balls=</td><td></td><td>5balls</td><td>8balls</td><td>8balls</td></tr><tr><td>23balls+145balls=</td><td></td><td></td><td>58balls</td><td>168balls</td></tr><tr><td>23balls+1,855balls=</td><td>x</td><td></td><td>2,878balls</td><td>2,988balls</td></tr></table></body></html>', 'reference': '[8] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6'}, 9: {'id': 9, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[9] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}}\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:17\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 文章结构安排\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:17\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"本文旨在全面综述思维链提示方法的当前研究现状，结构安排如下：首先，在引言部分，我们将介绍研究背景与意义，概述思维链提示方法的基本概念，并详细说明本文的结构安排。接着，在第二部分，我们将深入探讨思维链提示方法的基本原理，包括其定义、工作原理、设计原则以及主要应用场景。随后，第三部分将重点介绍思维链提示方法的最新进展，涵盖基于教育理论的策略<sup>1</sup>、思维链微调方法<sup>2</sup>、在文本到SQL解析<sup>3</sup>、知识库问题生成<sup>4</sup>、自然语言理解任务<sup>5</sup>、多领域自然语言理解任务<sup>6</sup>、复杂低级控制任务<sup>7</sup>、自动思维链提示推理方法<sup>8</sup>、后门攻击方法<sup>9</sup>以及基于大型语言模型信号的多模态思维链推理教学方法<sup>10</sup>等方面的应用。第四部分将分析思维链提示方法面临的挑战与未来研究方向，探讨其局限性<sup>11</sup>、改进策略<sup>12</sup>以及潜在的未来研究方向<sup>13</sup>。最后，在结论部分，我们将总结思维链提示方法的研究成果，探讨其潜在影响，并对未来研究进行展望。通过这种结构安排，我们希望能够为读者提供一个系统、全面的思维链提示方法研究现状的视角。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:17\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['3', '1', '4', '8', '2', '10', '12', '11', '13', '6', '5', '9', '7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:17\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[3] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6', '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[4] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[2] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:18\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的研究总结\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:18\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示方法作为一种新兴的研究范式，近年来在人工智能领域引起了广泛关注。该方法通过引导预训练语言模型（PLMs）逐步展开多步推理，从而解决复杂任务。研究表明，传统的PLMs虽然在内部积累了大量世界知识，但在处理需要复杂多步推理的任务时，往往难以有效调用这些知识<sup>0</sup>。思维链提示方法通过迭代式地激发PLMs中的相关知识，显著提升了模型在多步推理任务中的表现<sup>0</sup>。\n",
      "\n",
      "在基本原理方面，思维链提示方法的核心在于其动态合成提示的能力，能够根据当前推理步骤的上下文条件生成合适的提示，从而逐步引导模型完成复杂任务<sup>0</sup>。这种方法不仅克服了传统提示方法在处理多步推理任务时的局限性，还展示了在多种应用场景中的广泛适用性，包括文本到SQL解析、知识库问题生成以及自然语言理解任务等。\n",
      "\n",
      "最新的研究进展进一步拓展了思维链提示方法的应用范围。基于教育理论的思维链提示策略、思维链微调方法以及多模态思维链推理教学方法等，都为提升PLMs的推理能力提供了新的思路<sup>0</sup>。特别是在复杂低级控制任务中，思维链提示方法的应用展示了其在跨领域任务中的潜力。\n",
      "\n",
      "然而，尽管取得了显著进展，思维链提示方法仍面临诸多挑战。例如，如何进一步提高提示的动态适应性和上下文敏感性，以及如何有效应对后门攻击等问题，仍是未来研究需要重点关注的方向<sup>0</sup>。此外，现有研究多集中于单一领域或特定任务，缺乏对多领域自然语言理解任务的系统性探索。\n",
      "\n",
      "综上所述，思维链提示方法在提升PLMs推理能力方面展现了巨大潜力，但仍需在理论深化、方法优化和应用拓展等方面进行深入研究。未来的研究应着重于解决现有方法的局限性，探索更广泛的应用场景，并加强对多领域任务的系统性研究，以期实现更全面、高效的思维链提示框架。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:18\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:18\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的未来研究方向\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示方法的未来研究方向主要集中在以下几个方面：首先，提升模型在复杂推理任务中的泛化能力是一个重要方向。现有的思维链提示方法在处理复杂问题时，往往依赖于特定的演示示例，导致模型在遇到未见过的任务时表现不佳<sup>2</sup>。未来研究可以探索如何通过更通用的提示策略，使模型能够更好地适应多样化的推理任务<sup>4</sup>。其次，改进思维链提示的自动生成机制也是一个关键领域。目前，许多方法依赖于手工构建的推理链，这不仅耗时且难以扩展<sup>1</sup>。未来可以研究如何利用自动化技术，如基于深度学习的生成模型，来高效地生成高质量的思维链提示<sup>5</sup>。此外，多模态思维链推理也是一个值得探索的方向。现有的研究主要集中在文本模态，而结合图像、音频等多模态信息可以显著提升模型的推理能力<sup>4</sup>。例如，在处理涉及图像内容的推理任务时，结合视觉信息可以有效提高推理的准确性和鲁棒性。最后，思维链提示方法在特定领域的应用也需要进一步拓展。例如，在医疗诊断、法律咨询等专业领域，思维链提示方法可以辅助专业人士进行复杂决策<sup>3</sup>。通过结合领域知识和思维链提示，可以开发出更加智能的辅助决策系统。综上所述，思维链提示方法的未来研究方向包括提升泛化能力、自动化生成机制、多模态推理以及特定领域的应用拓展，这些方向将为思维链提示方法的发展带来新的机遇和挑战。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['3', '1', '4', '2', '5']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[3] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7', '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6', '[4] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1', '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[5] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示的定义与工作原理\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示（Chain of Thought Prompting）是一种用于增强人工智能模型推理能力的方法，通过引导模型生成中间推理步骤，从而提高其解决问题的准确性和可解释性。其核心思想是模拟人类的思维过程，使模型在得出最终答案之前，能够逐步展示其推理路径。思维链提示的定义可以概括为：在给定任务输入时，通过设计特定的提示语，引导模型生成一系列逻辑上连贯的中间推理步骤，最终得出结论<sup>0</sup>。\\n\\n思维链提示的工作原理主要基于大型语言模型的生成能力。具体而言，它通过以下步骤实现：首先，设计一个包含任务描述和示例的提示语，这些示例通常包含完整的推理过程；其次，将提示语输入到语言模型中，模型基于提示语生成相应的推理步骤；最后，模型根据生成的推理步骤得出最终答案。这一过程不仅使模型的推理过程更加透明，还能够在一定程度上纠正模型在推理过程中可能出现的错误<sup>0</sup>。\\n\\n例如，在数学推理任务中，思维链提示可以通过提供一系列类似的数学问题和其详细的解题步骤，引导模型在解决新问题时，生成类似的推理过程。这种方法在处理复杂问题时尤为有效，因为它允许模型将复杂问题分解为多个子问题，逐步解决，从而提高整体推理的准确性<sup>0</sup>。\\n\\n思维链提示的设计关键在于提示语的质量和多样性。高质量的提示语能够更有效地引导模型生成合理的推理步骤，而多样化的提示语则有助于提高模型在不同任务上的泛化能力。研究表明，通过结合多种提示策略，如过程导向的方法和结果导向的方法，可以进一步提升思维链提示的效果<sup>0</sup>。\\n\\n总的来说，思维链提示方法通过模拟人类的思维过程，显著提升了人工智能模型在复杂任务中的表现，为未来的智能系统设计提供了新的思路。然而，如何设计更有效的提示语，以及如何在不同领域和任务中广泛应用思维链提示，仍然是当前研究的重要方向<sup>0</sup>。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的潜在影响\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示方法在人工智能领域具有深远的潜在影响。首先，它在提升模型推理能力方面表现出显著优势。通过引导模型逐步展开推理过程，思维链提示能够有效增强模型在复杂、多步骤任务中的表现<sup>0</sup>。例如，在多步推理任务中，迭代式提示框架能够逐步激发模型内部的相关知识，从而提高推理的准确性和可靠性<sup>0</sup>。其次，思维链提示方法为小型模型提供了新的可能性。通过符号化思维链蒸馏技术，小型模型也能够实现类似大型模型的逐步推理能力<sup>1</sup>。这不仅降低了计算资源的消耗，还使得高性能推理能力在更广泛的场景中得到应用<sup>1</sup>。此外，思维链提示方法在评估推理链的正确性和信息量方面也展现出潜力。ReCEval方法通过评估推理步骤的正确性和信息量，为思维链的质量提供了更为全面的评价标准<sup>2</sup>。然而，思维链提示方法也面临一些挑战，如对隐含知识的依赖和模型内部推理过程的不透明性<sup>2</sup>。尽管如此，其在教育、医疗、金融等多个领域的应用前景依然广阔。例如，在教育领域，思维链提示可以帮助学生更好地理解复杂概念；在医疗领域，它能够辅助医生进行诊断推理；在金融领域，则可用于风险评估和决策支持。总之，思维链提示方法不仅在技术上推动了人工智能的发展，还在实际应用中展现出巨大的潜力，值得我们进一步深入研究和探索。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '1', '2']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:19\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[1] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4', '[2] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的概述\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示方法（Chain of Thought Prompting）是近年来人工智能领域，特别是自然语言处理（NLP）中备受关注的一种技术。其核心思想是通过引导预训练语言模型（PLMs）生成一系列中间推理步骤，从而提高模型在复杂、多步推理任务中的表现。传统的PLMs虽然在知识储备方面表现出色，但在处理需要多步逻辑推理的任务时往往力不从心。思维链提示方法的出现，旨在弥补这一缺陷，使模型能够像人类一样进行逐步推理<sup>0</sup>。\\n\\n具体而言，思维链提示方法通过设计特定的提示（prompt），激发模型内部的推理能力。这些提示不仅包含任务相关的信息，还引导模型生成中间推理步骤，最终得出正确答案。例如，在数学问题解答或逻辑推理任务中，思维链提示可以帮助模型逐步解析问题，展示每一步的推理过程，从而提高解答的准确性和可解释性。\\n\\n近年来，研究者们提出了多种思维链提示策略。其中，迭代提示框架（Iterative Prompting Framework）是一种较为突出的方法。该方法通过逐步迭代的方式，动态合成与当前推理步骤相关的提示，从而有效捕捉不同推理步骤间的变化<sup>0</sup>。实验表明，这种迭代方案在多步推理任务中表现出显著的优势。\\n\\n此外，思维链提示方法不仅在单一领域内展现出强大的潜力，还在多领域自然语言理解任务中得到了广泛应用。例如，在文本到SQL解析、知识库问题生成等任务中，思维链提示方法均取得了显著的成效。然而，尽管取得了诸多进展，思维链提示方法仍面临一些挑战，如提示设计的复杂性、模型推理过程的可解释性等问题，这些都需要未来进一步的研究和探索。\\n\\n总的来说，思维链提示方法为提升PLMs在复杂推理任务中的表现提供了一种有效的途径，其研究不仅具有重要的理论意义，也具有广泛的应用前景。未来，随着研究的深入，思维链提示方法有望在更多领域发挥更大的作用。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的局限性\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"尽管思维链提示方法在自然语言理解任务中取得了显著的进展，但其局限性也不容忽视。首先，思维链提示方法依赖于高质量的提示设计，这往往需要大量的人工干预和专业知识<sup>3</sup>。这种依赖性限制了方法的可扩展性，并且可能导致潜在的错误或偏差。其次，现有的思维链提示方法在处理复杂的多步骤推理任务时可能遇到困难，因为它们可能无法捕捉到不同推理步骤之间的变化<sup>0</sup>。此外，思维链提示方法在处理低资源设置下的数学推理任务时仍存在挑战，因为它们可能无法有效地利用有限的训练数据<sup>4</sup>。最后，思维链提示方法在处理多模态数据时可能存在局限性，因为它们可能无法有效地整合来自不同模态的信息<sup>2</sup>。这些局限性表明，思维链提示方法仍需进一步研究和改进，以实现更广泛的应用和更高的性能。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '4', '2', '3']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[4] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6', '[2] Attention Prompting on Image for Large Vision-Language Models, ECCV, 2024, chunk 9', '[3] Let GPT Be a Math Tutor: Teaching Math Word Problem Solvers with Customized Exercise Generation., EMNLP, 2023, chunk 7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示在复杂低级控制任务中的应用\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"在复杂低级控制任务中，思维链提示方法的应用为解决多步骤推理问题提供了新的途径。例如，在机器人控制领域，通过思维链提示，可以引导模型逐步完成复杂的动作序列。研究表明，这种方法能够显著提高模型在执行复杂任务时的准确性和效率<sup>8</sup>。此外，思维链提示在游戏AI中也显示出其潜力，通过逐步推理，模型能够更好地理解游戏规则，并制定出更有效的策略<sup>9</sup>。然而，思维链提示在低级控制任务中的应用仍面临一些挑战，如如何设计有效的提示，以及如何处理模型在推理过程中可能出现的错误。未来的研究可以探索更先进的提示策略，以及结合外部知识库来提高模型的推理能力<sup>7</sup>。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['8', '9', '7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:21\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[8] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1', '[9] Use Your INSTINCT: INSTruction Optimization for LLMs Using Neural Bandits Coupled with Transformers, ICML, 2024, chunk 4', '[7] Large Language Models Are Zero-Shot Reasoners, NeurIPS, 2022, chunk 1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:23\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示的设计原则\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:23\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示的设计原则是构建有效思维链提示方法的核心，直接影响模型在复杂推理任务中的表现。首先，**语境敏感性**是设计思维链提示的重要原则之一。研究表明，动态合成与当前步骤语境相关的提示能够显著提升多步推理任务的性能<sup>0</sup>。这种语境敏感性使得模型能够更好地捕捉不同推理步骤间的变异性，从而提高推理的准确性和连贯性。\n",
      "\n",
      "其次，**逐步引导性**也是思维链提示设计的关键。通过逐步引导模型展开推理过程，可以模拟人类解决问题的“思维链”。例如，在数学推理任务中，采用“最少到最多”提示策略，将复杂问题分解为多个子问题，并依次解决这些子问题，从而逐步引导模型完成整个推理过程<sup>1</sup>。\n",
      "\n",
      "此外，**多样性和鲁棒性**也是设计思维链提示时需要考虑的因素。单一推理路径可能存在错误，因此引入多样化的提示和推理路径可以提高模型的鲁棒性。例如，通过采样生成多个推理路径，并选择最常见答案的方法，可以有效降低单一推理路径的错误率<sup>1</sup>。\n",
      "\n",
      "**可解释性**同样是思维链提示设计的重要原则。思维链提示不仅要提高模型的推理能力，还应提供可解释的推理过程，以便研究人员和用户理解模型的决策依据。例如，使用自然语言注释中间推理步骤，可以使模型的推理过程更加透明和可解释<sup>1</sup>。\n",
      "\n",
      "最后，**任务适应性**是思维链提示设计的另一个关键点。不同任务可能需要不同的提示策略，因此设计通用的思维链提示方法具有一定的挑战性。研究表明，针对特定任务定制提示策略，可以显著提升模型在该任务上的表现<sup>2</sup>。\n",
      "\n",
      "综上所述，思维链提示的设计原则包括语境敏感性、逐步引导性、多样性和鲁棒性、可解释性以及任务适应性。这些原则相互关联，共同构成了思维链提示方法设计的理论基础。未来的研究可以进一步探索这些原则在不同任务和场景中的应用，以提升思维链提示方法的普适性和有效性。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:23\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '1', '2']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:23\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6', '[2] Generated Knowledge Prompting for Commonsense Reasoning, ACL, 2022, chunk 5']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 基于教育理论的思维链提示策略\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"基于教育理论的思维链提示策略是近年来思维链提示方法研究中的一个新兴领域。这些策略借鉴了教育理论中的原则，旨在通过引导学习者的思维过程来提高学习效果。例如，Wang等人<sup>0</sup>提出了一种迭代提示框架，该框架逐步从预训练语言模型中提取相关知识，以解决多步推理任务。这种方法模拟了人类在面对复杂决策时如何发展“思维链”的过程。此外，Sun等人<sup>1</sup>探讨了如何通过方向性刺激提示来引导大型语言模型进行思维链推理。他们使用策略模型生成特定实例的触发提示，例如“让我们一步一步思考”，以优化不同样本的提示效果。这些研究表明，基于教育理论的思维链提示策略在提高语言模型的多步推理能力方面具有巨大潜力。然而，这些方法仍处于早期阶段，需要进一步的研究和探索，以更好地理解和应用教育理论中的原则。未来的研究方向可能包括探索更多教育理论的应用，以及开发更有效的提示策略，以提高语言模型的学习效果。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:24\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[1] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示的应用场景\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示方法作为一种新兴的提示技术，已经在多个领域展现出广泛的应用潜力。首先，在数学推理任务中，思维链提示被用于提升模型的复杂推理能力。例如，Wei等人提出的Few-shot-CoT方法通过手动构建推理链来引导模型进行多步推理<sup>1</sup>。此外，Zhou等人提出的Least-to-most CoT方法将复杂问题分解为多个子问题，逐步解决，从而提高推理质量<sup>1</sup>。这些方法在数学推理任务中取得了显著成效，展示了思维链提示在处理复杂逻辑推理问题上的优势。\n",
      "\n",
      "在自然语言理解（NLU）任务中，思维链提示同样发挥了重要作用。例如，Wang等人提出的Iteratively Prompt Pre-trained Language Models方法通过迭代提示框架，逐步从预训练语言模型中提取相关知识，以应对多步推理任务<sup>0</sup>。这种方法不仅提高了模型在复杂任务中的表现，还增强了模型对输入上下文的敏感度，使其能够更好地处理多样化的推理步骤。\n",
      "\n",
      "此外，思维链提示在文本到SQL解析任务中也展现出应用价值。通过构建详细的推理链，模型能够更准确地理解和转换自然语言查询为SQL语句，从而提高数据库查询的准确性和效率。Lu等人提出的PromptPG-CoT方法利用强化学习生成高质量的推理链，进一步提升了文本到SQL解析的性能<sup>1</sup>。\n",
      "\n",
      "在知识库问题生成领域，思维链提示方法也被广泛应用。通过引导模型生成详细的推理过程，可以生成更具逻辑性和连贯性的问题，从而提升知识库问答系统的质量。这些应用场景不仅验证了思维链提示方法的有效性，也揭示了其在不同领域的广泛应用前景。\n",
      "\n",
      "然而，尽管思维链提示方法在多个领域取得了显著进展，仍存在一些研究空白。例如，在低资源设置下的数学推理任务中，如何有效应用思维链提示方法仍是一个待解决的问题<sup>1</sup>。此外，多模态环境下的推理任务，如结合视觉信息的推理，也是未来研究的重要方向<sup>1</sup>。\n",
      "\n",
      "综上所述，思维链提示方法在数学推理、自然语言理解、文本到SQL解析和知识库问题生成等多个领域展现出强大的应用潜力。未来研究可以进一步探索其在低资源设置和多模态环境下的应用，以期实现更广泛的应用场景和更高的性能表现。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['1', '0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:26\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[1] A Survey of Deep Learning for Mathematical Reasoning, ACL, 2023, chunk 6', '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示在自然语言理解任务中的应用\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 对思维链提示方法未来研究的展望\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链提示方法在自然语言理解任务中的应用已经显示出巨大的潜力。例如，在文本到SQL解析任务中，思维链提示可以帮助大型语言模型（LLMs）通过逐步推理来提高解析精度<sup>0</sup>。这种方法通过将复杂问题分解为更简单的子问题，并指导LLMs逐步解决这些子问题，从而实现更准确的最终答案。此外，思维链提示在知识库问题生成中的应用也取得了显著成果<sup>1</sup>。通过在提示中明确描述中间推理步骤，LLMs能够生成更准确和更相关的问题。\n",
      "\n",
      "然而，思维链提示方法在自然语言理解任务中的应用也面临一些挑战。例如，当任务需要比示例问题更复杂的泛化能力时，思维链提示的性能可能会下降<sup>2</sup>。此外，思维链提示方法的效率也是一个需要考虑的问题，尤其是在处理大规模数据集时<sup>3</sup>。\n",
      "\n",
      "为了解决这些挑战，研究人员正在探索各种改进策略。例如，基于教育理论的思维链提示策略<sup>4</sup>，通过将教育理论中的原则应用于提示设计，可以提高LLMs的学习效果。此外，思维链微调方法<sup>5</sup>通过微调LLMs的参数来适应特定的自然语言理解任务，从而提高性能。\n",
      "\n",
      "未来，思维链提示方法在自然语言理解任务中的应用有望取得更大的突破。自动思维链提示推理方法<sup>6</sup>和基于大型语言模型信号的多模态思维链推理教学方法<sup>7</sup>等新兴技术，将为LLMs在自然语言理解任务中的推理能力提供新的可能性。通过不断改进和探索，思维链提示方法有望成为自然语言理解领域的重要工具，为人工智能的发展带来新的突破。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"对思维链提示方法未来研究的展望。尽管思维链提示方法在多步推理任务中取得了显著的进展，但仍然存在一些挑战和未来的研究方向。首先，思维链提示方法的性能很大程度上依赖于提示的设计，因此如何自动生成更有效的提示是一个重要的研究方向。例如，Wang et al. (2022b) 提出的自动思维链提示推理方法，通过自动构建和采样多样化的推理路径，可以提高思维链提示的性能。其次，思维链提示方法在处理复杂任务时可能会受到累积误差的影响，因此如何减少推理过程中的错误是一个关键问题。例如，Bai et al. (2022b) 和 Lyu et al. (2023) 利用模型的反馈和过去的错误来改进推理过程。此外，思维链提示方法在处理需要外部知识的任务时可能会受到限制，因此如何将外部知识融入到思维链提示中是一个值得探索的方向。例如，Wang et al. (2023a) 提出的知识链提示方法，通过使用证据三元组来限制生成不真实和不忠实答案的产生。最后，思维链提示方法在多领域自然语言理解任务中的应用还有待进一步研究。例如，Du et al. (2023) 和 Liang et al. (2023) 引入的多智能体辩论方法，可以提高生成内容的事实准确性，并减少谬误和幻觉。总的来说，思维链提示方法在多步推理任务中具有巨大的潜力，未来的研究可以进一步探索其性能和应用的改进。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['3', '0', '4', '1', '2', '6', '5', '7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: []\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6', '[0] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[4] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2', '[1] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[2] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0', '[6] Large Language Models Are Human-Level Prompt Engineers, ICLR, 2023, chunk 7', '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 6', '[7] Language Models are Multilingual Chain-of-Thought Reasoners, ICLR, 2023, chunk 1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: []\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示在文本到SQL解析中的应用\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "    \"content\": \"思维链提示在文本到SQL解析中的应用是当前自然语言处理领域的一个重要研究方向。文本到SQL解析任务旨在将自然语言问题转换为SQL查询，广泛应用于构建数据库的自然语言接口。传统的监督学习方法虽然有效，但需要大量标注数据，获取成本高昂。近年来，基于大型语言模型（LLMs）的上下文学习（in-context learning）成为了一种新的研究范式，能够在少量示例的情况下实现优异的性能<sup>0</sup>。然而，直接应用上下文学习在文本到SQL解析任务中仍存在性能不足的问题<sup>2</sup>。\n",
      "\n",
      "    研究表明，文本到SQL解析任务需要复杂的、多步骤的推理过程。即使是看似简单的问题，如“Kyle的ID是什么”，模型也需要将问题与数据库模式相关联，推断模式项之间的关系，并构建语法正确的SQL子句<sup>0</sup>。为此，思维链（Chain-of-Thought, CoT）提示方法被提出，并显示出在多步骤推理任务中的潜力<sup>1</sup>。\n",
      "\n",
      "    在文本到SQL解析中，思维链提示方法通过生成一系列中间推理步骤来引导LLMs进行复杂推理。例如，Wei等人<sup>1</sup>提出了一种基于思维链提示的方法，通过将问题解决过程与SQL查询的执行过程进行类比，生成自然语言描述的中间执行步骤。另一种方法是Zhou等人<sup>1</sup>提出的“最少到最多”（Least-to-Most）提示，该方法首先将原始问题分解为一系列子问题，然后逐步解决每个子问题，最终生成SQL查询。\n",
      "\n",
      "    然而，直接应用这些思维链提示方法存在一些问题，如错误传播和计算成本增加<sup>2</sup>。为此，研究者提出了新的改进方法。例如，问题分解提示（QDecomp）方法通过将原始复杂问题分解为更简单的子问题，逐步生成SQL查询，从而避免了详细的推理步骤导致的错误传播<sup>0</sup>。QDecomp方法的变体QDecomp<sup>+</sup>InterCOL通过包含每个子问题涉及的表和列名，进一步提高了模型的性能<sup>0</sup>。\n",
      "\n",
      "    实验结果表明，这些改进的思维链提示方法在Spider和Spider Realistic数据集上显著优于现有的提示方法，分别提高了2.4和1.5个绝对百分点<sup>0</sup>。这表明，通过合理的推理步骤设计和问题分解，思维链提示方法能够有效提升文本到SQL解析的性能。\n",
      "\n",
      "    尽管取得了显著进展，思维链提示在文本到SQL解析中的应用仍面临一些挑战。例如，如何平衡推理步骤的详细程度与错误传播的风险，以及如何进一步提高模型的泛化能力等问题仍需进一步研究<sup>2</sup>。未来研究可以探索更有效的提示策略，结合多模态信息，以及利用大型语言模型的信号来进一步提升思维链提示方法的性能<sup>0</sup>。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '1', '2']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:28\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 1', '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示在知识库问题生成中的应用\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链提示在知识库问题生成中的应用是一个新兴的研究领域，它利用大型语言模型（LLMs）的能力来生成高质量的问题。这种方法的核心思想是通过提供一系列的中间推理步骤来引导LLMs生成问题，这些步骤模拟了人类在解决问题时的思维过程。例如，在知识库问答（KBQA）任务中，思维链提示可以帮助LLMs更好地理解问题的语义，从而生成更准确的问题。最近的研究表明，思维链提示在KBQA任务中取得了显著的成果<sup>2</sup>。\n",
      "\n",
      "然而，思维链提示在知识库问题生成中的应用也面临一些挑战。首先，如何设计有效的提示来引导LLMs生成高质量的问题是当前研究的一个关键问题。其次，思维链提示的性能在很大程度上取决于LLMs的内部知识，因此如何有效地利用LLMs的知识是一个重要的研究方向。此外，思维链提示在处理复杂问题时的性能也需要进一步提高。\n",
      "\n",
      "为了解决这些挑战，研究人员提出了一些改进方法。例如，一些研究尝试通过引入外部知识源来增强LLMs的知识<sup>1</sup>。此外，一些研究探索了不同的思维链提示结构，以更好地引导LLMs生成问题<sup>3</sup>。这些方法都在一定程度上提高了思维链提示在知识库问题生成中的应用性能。\n",
      "\n",
      "总的来说，思维链提示在知识库问题生成中的应用是一个具有潜力的研究方向。随着LLMs技术的不断发展，思维链提示有望在知识库问题生成中发挥更大的作用。未来的研究可以进一步探索如何设计更有效的提示，如何更好地利用LLMs的知识，以及如何提高思维链提示在处理复杂问题时的性能。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['1', '2', '3']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:29\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[1] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6', '[2] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[3] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示在多领域自然语言理解任务中的应用\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链微调方法\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链提示方法在多领域自然语言理解任务中的应用正逐渐成为研究的热点。该方法通过引导大型语言模型（LLMs）生成一系列中间推理步骤，从而提高模型在复杂推理任务上的表现。例如，在文本到SQL解析任务中，思维链提示已被证明能够帮助LLMs逐步推导出正确的SQL查询语句<sup>1</sup>。此外，思维链提示在知识库问题生成中的应用也显示出其潜力，它能够帮助模型生成更加准确和多样化的答案<sup>2</sup>。\n",
      "\n",
      "在自然语言理解任务中，思维链提示的应用同样取得了显著成果。例如，在多步推理任务中，思维链提示能够显著提高LLMs的准确率，尤其是在数学问题解决和常识推理等任务上<sup>3</sup><sup>4</sup>。此外，思维链提示在复杂低级控制任务中的应用也显示出其潜力，它能够帮助模型更好地理解和执行复杂的指令<sup>5</sup>。\n",
      "\n",
      "然而，思维链提示方法也面临着一些挑战。例如，如何设计有效的提示模板，以及如何选择合适的推理路径等问题。为了解决这些问题，研究者们提出了多种改进策略，如基于复杂度的提示选择方法<sup>6</sup>，以及自动思维链提示推理方法<sup>7</sup>。这些方法都在一定程度上提高了思维链提示方法的性能。\n",
      "\n",
      "未来，思维链提示方法在多领域自然语言理解任务中的应用仍有很大的研究空间。例如，如何将思维链提示与其他技术（如知识图谱）相结合，以及如何进一步提高思维链提示方法的鲁棒性和泛化能力等问题，都是值得深入研究的方向<sup>8</sup>。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链微调方法（Fine-tune-CoT）是一种新兴的技术，旨在通过微调较小的语言模型（LMs）来提升其复杂推理能力。这种方法的核心思想是利用大型语言模型生成推理样本，然后通过微调将这些样本中的推理能力转移到较小的模型上。Fine-tune-CoT方法的一个关键优势是，它能够显著提高小型模型的推理能力，而无需依赖像GPT-3这样的大型模型<sup>0</sup>。\n",
      "\n",
      "  研究表明，思维链提示在大型语言模型上表现出色，但其在小型模型上的效果却有限。为了解决这个问题，Fine-tune-CoT方法应运而生。该方法首先利用大型语言模型生成一系列的推理样本，这些样本包含了从简单到复杂的各种推理步骤。然后，将这些样本用于微调小型语言模型，使其能够学习到复杂的推理模式<sup>2</sup>。\n",
      "\n",
      "  Fine-tune-CoT方法的有效性已经在多个复杂任务和不同规模的模型上得到了验证。实验结果表明，经过微调的小型模型在推理能力上有了显著的提升，甚至在某些任务上能够超越原来的大型模型。此外，这种方法还能够减少模型尺寸的需求，从而降低部署成本<sup>0</sup>。\n",
      "\n",
      "  尽管Fine-tune-CoT方法在提升小型模型的推理能力方面取得了显著的成果，但仍存在一些挑战。例如，如何有效地生成多样化的推理样本，以及如何优化微调过程以提高模型的泛化能力。未来的研究可以探索更有效的样本生成策略，以及结合其他技术如指令微调和自改进来进一步提升模型的性能<sup>2</sup>。\n",
      "\n",
      "  总的来说，思维链微调方法为小型语言模型的推理能力提升提供了一种有效的途径。随着研究的深入，该方法有望在更多领域得到应用，为人工智能的发展带来新的突破。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['3', '1', '4', '8', '2', '6', '5', '7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '2']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2', '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[4] Language Models are Multilingual Chain-of-Thought Reasoners, ICLR, 2023, chunk 1', '[8] Instruction Induction: From Few Examples to Natural Language Task   Descriptions, ACL, 2023, chunk 4', '[2] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 6', '[6] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0', '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 6', '[7] LLMs Can Find Mathematical Reasoning Mistakes by Pedagogical Chain-of-Thought, IJCAI, 2024, chunk 1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] Large Language Models Are Reasoning Teachers, ACL, 2023, chunk 0', '[2] Teaching Small Language Models to Reason., ACL, 2023, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 基于大型语言模型信号的多模态思维链推理教学方法\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"近年来，大型语言模型（LLMs）在自然语言处理领域取得了显著进展，特别是在多模态推理任务中展现出强大的能力。多模态推理要求模型能够跨越多种模态（如文本和图像）进行推理，以回答问题。然而，现有的多模态推理方法往往由于生成的推理链条质量不足而表现不佳。为了解决这个问题，研究者们提出了基于大型语言模型信号的多模态思维链推理教学方法。\n",
      "\n",
      "该方法的核心思想是利用大型语言模型生成的信号来指导多模态推理过程。具体而言，该方法首先利用大型语言模型生成多个推理链条和答案，然后通过投票机制选择最准确的推理链条和答案。这种投票机制不仅能够提高生成的推理链条的质量，还能够提高答案的准确性和鲁棒性。\n",
      "\n",
      "例如，Tan等人提出了一种名为MC-CoT的自一致性训练策略，该方法生成多个推理链条和答案，并通过投票机制选择最准确的结果。实验结果表明，该方法能够显著提高模型在多个基准数据集上的性能。值得注意的是，即使是较小的模型，在采用该方法后也能够达到与较大模型相当的性能，这表明该方法在利用推理链条提高多模态推理能力方面的潜力。\n",
      "\n",
      "此外，该方法还具有其他优点。例如，它能够提高模型的泛化能力，使其能够更好地处理未见过的任务。此外，它还能够提高模型的解释性，使人们能够更好地理解模型的推理过程。\n",
      "\n",
      "然而，该方法也存在一些挑战。例如，它需要大量的计算资源来生成多个推理链条和答案。此外，它还需要仔细设计投票机制，以确保其能够有效地选择最准确的推理链条和答案。\n",
      "\n",
      "总的来说，基于大型语言模型信号的多模态思维链推理教学方法是一种很有前景的方法，它能够有效地提高多模态推理任务的性能。未来，研究者们可以进一步探索该方法在其他领域的应用，并解决其面临的挑战。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: []\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:30\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: []\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示的后门攻击方法\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链提示方法在提高大型语言模型（LLMs）的推理能力方面取得了显著进展，尤其是在解决需要系统推理过程的任务时。然而，这种方法的广泛应用也带来了新的安全挑战，特别是后门攻击的威胁。后门攻击旨在通过在模型中植入恶意行为，使得模型在接收到特定触发条件时输出非预期的恶意内容。在思维链提示的背景下，攻击者可以通过在提示中插入精心设计的后门触发器来误导LLMs，使其在推理过程中产生错误的输出。\n",
      "\n",
      "  BadChain<sup>0</sup>是一种针对LLMs的思维链提示后门攻击方法，它通过在提示中插入后门触发器，并利用LLMs的推理能力，将恶意内容与触发器联系起来。这种方法不需要访问训练数据集或模型参数，因此可以针对商业LLMs进行攻击。BadChain通过在模型的推理步骤中插入一个后门推理步骤，从而在存在后门触发器的情况下改变最终输出。实验表明，BadChain在多个LLMs和复杂基准任务上均取得了高攻击成功率，例如在GPT-4上达到了97.0%的平均攻击成功率。\n",
      "\n",
      "  另一种后门攻击方法是NOTABLE<sup>1</sup>，它通过在编码器中直接将触发器与目标锚点绑定，从而实现可迁移的后门攻击。与将后门注入嵌入层或词嵌入向量的传统方法不同，NOTABLE利用了编码器中的注意力机制，在触发器和目标锚点之间建立直接的快捷连接。这使得攻击能够迁移到不同的基于提示的任务中，而不仅仅是特定任务。实验结果表明，NOTABLE在各种基准数据集上均取得了显著的攻击性能，攻击成功率超过90%。\n",
      "\n",
      "  这些研究表明，思维链提示方法虽然提高了LLMs的推理能力，但也引入了新的安全风险。因此，开发有效的防御措施来对抗这些后门攻击方法至关重要。未来的研究需要探索更强大的防御策略，以确保思维链提示方法的安全性和可靠性。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['0', '1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[0] BadChain: Backdoor Chain-of-Thought Prompting for Large Language Models, ICLR, 2024, chunk 0', '[1] NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models, ACL, 2023, chunk 1']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 研究背景与意义\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"随着深度学习技术的快速发展，预训练语言模型（PLMs）在自然语言处理领域取得了显著的成果。PLMs能够通过大规模语料库的预训练，学习到丰富的语言知识和模式，并在各种下游任务中展现出强大的泛化能力。然而，尽管PLMs在处理简单事实性知识方面表现出色，但在解决需要复杂推理和多步骤推理过程的问题时，其性能往往受到限制<sup>1</sup><sup>2</sup>。例如，当面对涉及多层次推理、逻辑推理或常识推理的复杂问题时，PLMs往往难以有效地回忆和利用其内部知识，导致推理错误或无法得出正确答案。\n",
      "\n",
      "思维链提示方法（Chain-of-Thought prompting）作为一种新兴的提示技术，旨在引导PLMs进行逐步推理，从而解决复杂问题。该方法通过在提示中明确描述中间推理步骤，帮助PLMs逐步构建推理链，最终得出正确答案。与传统的提示方法相比，思维链提示方法能够更好地捕捉复杂问题的推理过程，提高PLMs在复杂推理任务中的性能<sup>3</sup><sup>4</sup>。\n",
      "\n",
      "研究思维链提示方法具有重要的理论和实践意义。从理论角度来看，思维链提示方法有助于我们深入理解PLMs的推理机制，揭示其内部知识表示和推理过程的特点。从实践角度来看，思维链提示方法能够显著提高PLMs在复杂推理任务中的性能，为构建更强大、更智能的自然语言处理系统提供新的思路和方法。此外，思维链提示方法还可以应用于教育、知识库构建、人机交互等领域，具有重要的应用价值<sup>5</sup><sup>6</sup>。\n",
      "\n",
      "然而，思维链提示方法也面临着一些挑战和局限性。例如，如何设计有效的提示模板、如何评估推理链的质量、如何提高提示方法的鲁棒性和泛化能力等问题，都需要进一步研究和探索。未来，随着PLMs和提示技术的不断发展，思维链提示方法有望在更多领域发挥重要作用，为构建更智能、更可靠的自然语言处理系统提供新的动力<sup>7</sup><sup>8</sup>。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['3', '1', '4', '8', '2', '6', '5', '7']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:33\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[3] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1', '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[4] Towards an Understanding of Stepwise Inference in Transformers: A Synthetic Graph Navigation Model, ICML, 2024, chunk 7', '[8] Large Language Models Are Zero-Shot Reasoners, NeurIPS, 2022, chunk 1', '[2] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4', '[6] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6', '[5] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6', '[7] ReCEval: Evaluating Reasoning Chains Via Correctness and Informativeness, EMNLP, 2023, chunk 7']\u001B[0m\n",
      "Writing subsections:   5%|▍         | 1/22 [00:30<10:49, 30.93s/it]\u001B[32m2025-02-04 23:22:39\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 思维链提示方法的改进策略\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:39\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"思维链提示方法（Chain-of-Thought prompting, CoT）作为一种新兴的技术，已经在提升大型语言模型（LLMs）的推理能力方面展现出显著的效果。然而，现有的CoT方法仍然存在一些局限性，例如在处理复杂推理任务时可能出现的误差累积和推理路径单一等问题。为了解决这些问题，研究者们提出了多种改进策略。\n",
      "\n",
      "  首先是思维链微调方法（Thought Propagation, TP）<sup>1</sup>，它通过探索类似的问题来提供一个精细的解决方案或知识密集的计划，从而促进新问题的解决。TP与现有的提示方法兼容，显示出即插即用的泛化能力和增强效果，适用于各种任务，如最短路径规划、创意写作和LLM代理规划。\n",
      "\n",
      "  另一种改进策略是自动思维链提示推理方法（AutoCoT）<sup>2</sup>，它使用自动化的方式构建和采样多样化的推理路径，从而提高LLMs在复杂推理任务上的性能。此外，主动提示（Active-Prompt）<sup>2</sup>选择对模型输出不确定性最大的样本进行标记，进一步提升了CoT的性能。\n",
      "\n",
      "  自我一致性（Self-Consistency）<sup>4</sup>是另一种有效的改进策略，它通过从LLM的解码器中采样多样化的推理路径，并选择最一致的答案来提高推理的准确性。这种方法在算术和常识推理基准测试中取得了显著的准确性提升。\n",
      "\n",
      "  除了上述方法，还有基于大型语言模型信号的多模态思维链推理教学方法（Algorithm of Thoughts）<sup>5</sup>，它通过算法推理路径推动LLMs进行推理，开创了一种新的情境学习模式。这种方法只需要一个或几个查询就能扩展LLMs的想法探索，其性能优于早期的单查询方法，并与最近采用广泛树搜索算法的多查询策略相当。\n",
      "\n",
      "  这些改进策略不仅提高了CoT的性能，也为未来研究提供了新的方向。例如，探索如何将CoT应用于生成任务，以及如何将源任务的数量扩展到更多任务，都是值得进一步研究的领域。此外，使用降采样设置来探索CoTs中其他可能重要的因素，也是未来研究的一个有趣方向。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:39\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['1', '4', '2', '5']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:39\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[1] Thought Propagation: an Analogical Approach to Complex Reasoning with Large Language Models, ICLR, 2024, chunk 6', '[4] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0', '[2] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1', '[5] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m161\u001B[0m - \u001B[37m小节主题: 自动思维链提示推理方法\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m162\u001B[0m - \u001B[37m小节内容: \n",
      "{\n",
      "  \"content\": \"自动思维链提示推理方法是近年来思维链提示领域的一个重要进展。该方法旨在通过自动化的方式生成思维链提示，从而提高大型语言模型在复杂推理任务中的表现。与传统的手动编写思维链提示相比，自动思维链提示推理方法具有更高的效率和更广泛的应用范围。\n",
      "\n",
      "  早期的思维链提示方法主要依赖于人工编写的示例，如Wei等人提出的思维链提示（CoT）<sup>1</sup>。然而，这种方法存在两个主要缺点：一是对于复杂任务，人工编写的示例可能不足以覆盖所有必要的推理步骤，难以泛化到不同的任务；二是单个解码过程容易受到错误推理步骤的影响，导致最终答案错误。为了解决这些限制，最近的研究主要集中在两个方面：一是手工制作更复杂的示例，即基于过程的方法<sup>2</sup>；二是利用集成方法，即基于结果的方法<sup>3</sup>。\n",
      "\n",
      "  基于过程的方法旨在提高思维链推理的质量，特别是对于复杂的推理任务。例如，Zhou等人提出的从最少到最多的提示（Least-to-most CoT）<sup>2</sup>，通过将复杂问题分解为一系列子问题，然后依次解决这些子问题，从而促进给定子问题的解决。类似地，Khot等人利用不同的分解结构和不同的提示来回答每个子问题<sup>2</sup>。除了这些多步推理方法，Chen等人<sup>2</sup>和Gao等人<sup>2</sup>提出了程序思维（PoT），这是一种使用大型语言模型将推理过程表达为程序的方法。然后，将计算委托给外部计算机，执行生成的程序以得出答案。\n",
      "\n",
      "  基于结果的方法承认单个推理路径可能存在错误，因此使用多个推理路径<sup>3</sup>。自我一致性（Self-Consistency）<sup>3</sup>通过从语言模型中进行采样生成一组推理路径，并通过选择最常见的答案来边缘化这些推理路径。除了使用单个提示进行采样以产生多个推理路径外，Li等人<sup>3</sup>还提出通过“自我教学”引入不同的提示，作为产生更高程度多样性的补充解决方案。\n",
      "\n",
      "  自动思维链提示推理方法结合了上述两种方法的优点，通过自动化的方式生成多样化的思维链提示，从而提高大型语言模型在复杂推理任务中的表现。例如，Zhang等人提出的自动CoT（AutoCoT）<sup>4</sup>使用自动化的方式构建和采样多样化的示例。主动提示（Active-Prompt）<sup>4</sup>根据模型在输出中的不确定性选择最有帮助的样本进行标记。最近，Li和Qiu<sup>4</sup>采用了一种策略，将高置信度的思维存储为外部记忆，并在推理过程中检索这些见解以辅助推理过程。\n",
      "\n",
      "  总之，自动思维链提示推理方法是思维链提示领域的一个重要进展，它通过自动化的方式生成思维链提示，从而提高大型语言模型在复杂推理任务中的表现。未来，该方法有望进一步发展，为大型语言模型在更多领域的应用提供支持。\"\n",
      "}\n",
      "\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m171\u001B[0m - \u001B[37m小节内容引用编码: ['1', '4', '2', '3']\u001B[0m\n",
      "\u001B[32m2025-02-04 23:22:43\u001B[0m | \u001B[1mINFO    \u001B[0m | \u001B[36mfunc.writer.writer_func\u001B[0m:\u001B[36mwrite_subsections_content\u001B[0m:\u001B[36m185\u001B[0m - \u001B[37m引用内容: ['[1] Self-Consistency Improves Chain of Thought Reasoning in Language Models, ICLR, 2023, chunk 0', '[4] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1', '[2] Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution., ICML, 2024, chunk 2', '[3] Complexity-Based Prompting for Multi-Step Reasoning, ICLR, 2023, chunk 2']\u001B[0m\n",
      "Writing subsections: 100%|██████████| 22/22 [00:40<00:00,  1.85s/it]\n"
     ]
    }
   ],
   "source": [
    "# 第五步根据结构，生成文献综述的初稿\n",
    "log.info(f\"=========文献综述初稿生成===========\")\n",
    "sub_section_content = writer.write_subsections_content_parallel(structure_list, structure_md)\n",
    "# lit_review.set_content(sub_section_content)\n",
    "# lit_review.content_to_LitReview()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "ename": "KeyError",
     "evalue": "1",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyError\u001B[0m                                  Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[11], line 2\u001B[0m\n\u001B[0;32m      1\u001B[0m lit_review\u001B[38;5;241m.\u001B[39mset_content(sub_section_content)\n\u001B[1;32m----> 2\u001B[0m \u001B[43mlit_review\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcontent_to_LitReview\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32me:\\华宝新能VOC\\2. 竞赛\\大模型科研工具\\func\\writer\\literature_review.py:103\u001B[0m, in \u001B[0;36mLiteratureReview.content_to_LitReview\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m    101\u001B[0m content \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcontent[sub_section_title][\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mcontent\u001B[39m\u001B[38;5;124m'\u001B[39m]\n\u001B[0;32m    102\u001B[0m reference \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mcontent[sub_section_title][\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mreference\u001B[39m\u001B[38;5;124m'\u001B[39m]\n\u001B[1;32m--> 103\u001B[0m content, reference, current_reference_number \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mformat_content_reference\u001B[49m\u001B[43m(\u001B[49m\u001B[43mcontent\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mreference\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcurrent_reference_number\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    104\u001B[0m reference_list\u001B[38;5;241m.\u001B[39mappend(reference)\n\u001B[0;32m    105\u001B[0m \u001B[38;5;66;03m# 将内容中的空行合并为单个空行  \u001B[39;00m\n",
      "File \u001B[1;32me:\\华宝新能VOC\\2. 竞赛\\大模型科研工具\\func\\writer\\literature_review.py:133\u001B[0m, in \u001B[0;36mLiteratureReview.format_content_reference\u001B[1;34m(self, content, reference, current_reference_number)\u001B[0m\n\u001B[0;32m    131\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i, ref \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28menumerate\u001B[39m(reference):\n\u001B[0;32m    132\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mstr\u001B[39m(i) \u001B[38;5;129;01min\u001B[39;00m ref_mapping:\n\u001B[1;32m--> 133\u001B[0m         ref \u001B[38;5;241m=\u001B[39m ref\u001B[38;5;241m.\u001B[39mreplace(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m[\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mi\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m]\u001B[39m\u001B[38;5;124m'\u001B[39m, \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m[\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mref_mapping[i]\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m]\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m    134\u001B[0m         new_reference\u001B[38;5;241m.\u001B[39mappend(ref)\n\u001B[0;32m    136\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m content, new_reference, current_reference_number\n",
      "\u001B[1;31mKeyError\u001B[0m: 1"
     ]
    }
   ],
   "source": [
    "lit_review.set_content(sub_section_content)\n",
    "lit_review.content_to_LitReview()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "content = sub_section_content['文章结构安排']['content']\n",
    "reference = sub_section_content['文章结构安排']['reference']\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "def format_content_reference(content, reference, current_reference_number):\n",
    "        \"\"\"将内容和参考文献格式化\"\"\"\n",
    "        # 创建一个字典来存储旧引用编号到新引用编号的映射\n",
    "        ref_mapping = {}\n",
    "        \n",
    "        # 找出所有的引用编号\n",
    "        citations = re.findall(r'<sup>(\\d+)</sup>', content)\n",
    "        \n",
    "        # 为每个唯一的引用编号分配新编号\n",
    "        for old_num in citations:\n",
    "            if old_num not in ref_mapping:\n",
    "                ref_mapping[old_num] = current_reference_number\n",
    "                current_reference_number += 1\n",
    "        \n",
    "        print(ref_mapping)\n",
    "        \n",
    "        # 替换内容中的引用编号\n",
    "        for old_num, new_num in ref_mapping.items():\n",
    "            content = content.replace(f'<sup>{old_num}</sup>', f'<sup>{new_num}</sup>')\n",
    "        \n",
    "        print(reference)\n",
    "        # 更新reference列表，保持与新编号的对应关系\n",
    "        new_reference = []\n",
    "        for _, value in enumerate(reference):\n",
    "            index = re.findall(r'\\[(\\d+)\\]', value)[0]\n",
    "            if index in ref_mapping:\n",
    "                ref = value.replace(f'[{index}]', f'[{ref_mapping[str(index)]}]')\n",
    "                new_reference.append(ref)\n",
    "                \n",
    "        return content, new_reference, current_reference_number"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'1': 4, '2': 5, '3': 6, '4': 7}\n",
      "['[3] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6', '[1] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2', '[4] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0', '[2] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0']\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('本文旨在全面综述思维链提示方法的当前研究现状，结构安排如下：首先，在引言部分，我们将介绍研究背景与意义，概述思维链提示方法的基本概念，并详细说明本文的结构安排。接着，在第二部分，我们将深入探讨思维链提示方法的基本原理，包括其定义、工作原理、设计原则以及主要应用场景。随后，第三部分将重点介绍思维链提示方法的最新进展，涵盖基于教育理论的策略<sup>7</sup>、思维链微调方法<sup>5</sup>、在文本到SQL解析<sup>6</sup>、知识库问题生成<sup>7</sup>、自然语言理解任务、多领域自然语言理解任务、复杂低级控制任务、自动思维链提示推理方法、后门攻击方法以及基于大型语言模型信号的多模态思维链推理教学方法等方面的应用。第四部分将分析思维链提示方法面临的挑战与未来研究方向，探讨其局限性、改进策略以及潜在的未来研究方向。最后，在结论部分，我们将总结思维链提示方法的研究成果，探讨其潜在影响，并对未来研究进行展望。通过这种结构安排，我们希望能够为读者提供一个系统、全面的思维链提示方法研究现状的视角。',\n",
       " ['[6] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6',\n",
       "  '[4] Exploring Chain-of-Thought Style Prompting for Text-to-SQL, EMNLP, 2023, chunk 2',\n",
       "  '[7] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0',\n",
       "  '[5] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'],\n",
       " 8)"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "format_content_reference(content, reference, 4)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{0: {'id': 0, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# Shepherd Pre-trained Language Models to Develop a Train of Thought: An Iterative Prompting Approach\\nBoshi Wang, Xiang Deng and Huan Sun The Ohio State University, Columbus, OH {wang.13930,deng.595,sun.397}@osu.edu\\n\\n# Abstract\\nWhile Pre-trained Language Models (PLMs) internalize a great amount of world knowledge, they have been shown incapable of recalling these knowledge to solve tasks requiring complex & multi-step inference procedures. Similar to how humans develop a “train of thought” for these tasks, how can we equip PLMs with such abilities? In this work, we explore an iterative prompting framework, a new prompting paradigm which progressively elicits relevant knowledge from PLMs for multi-step inference tasks. We identify key limitations of existing prompting methods, namely they are either restricted to queries with a single identifiable relation/predicate, or being agnostic to input contexts, which makes it difficult to capture variabilities across different inference steps. We propose an iterative context-aware prompter, which addresses these limitations by learning to dynamically synthesize prompts conditioned on the current step’s contexts. Experiments on three datasets involving multistep inference show the effectiveness of the iterative scheme and our proposed prompter design.', 'reference': '[0] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 0'}, 1: {'id': 1, 'title': 'Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models', 'content': '# Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models\\nBilgehan Sel 1 , Ahmad Al-Tawaha 1 , Vanshaj Khattar 1 , Lu Wang 2 , Ruoxi Jia 1 and Ming Jin 1 1 Virginia Tech 2 Microsoft\\n\\n# Abstract\\nCurrent literature, aiming to surpass the “Chain-of-Thought” approach, often resorts to an external modus operandi involving halting, modifying, and then resuming the generation process to boost Large Language Models’ (LLMs) reasoning capacities. This mode escalates the number of query requests, leading to increased costs, memory, and computational overheads. Addressing this, we propose the Algorithm of Thoughts —a novel strategy that propels LLMs through algorithmic reasoning pathways, pioneering a new mode of in-context learning. By employing algorithmic examples, we exploit the innate recurrence dynamics of LLMs, expanding their idea exploration with merely one or a few queries. Our technique outperforms earlier single-query methods and stands on par with a recent multi-query strategy that employs an extensive tree search algorithm. Intriguingly, our results suggest that instructing an LLM using an algorithm can lead to performance surpassing that of the algorithm itself, hinting at LLM’s inherent ability to weave its intuition into optimized searches. We probe into the underpinnings of our method’s efficacy and its nuances in application.', 'reference': '[1] Algorithm of Thoughts: Enhancing Exploration of Ideas in Large Language Models, ICML, 2024, chunk 0'}, 2: {'id': 2, 'title': 'Guiding Large Language Models Via Directional Stimulus Prompting.', 'content': \"# 3.3 Chain-of-Thought reasoning\\nWhile current methods primarily use general task-specific prompts, LLMs show sensitivity to them. Studies [ 69 ,26 ,79 ] demonstrate that LLMs can vary in performance based on the prompt used. As a result, much of the previous work has centered on manually [ 56 ] or automatically [ 61 ,79 ]crafting better prompts. However, these efforts mainly focus on task-specific prompts, which may not be optimal for every instance of a task. In our experiment, we employ our approach to generate instance-specific trigger prompts to elicit Chain-of-Thought (CoT) reasoning. Specifically, we train a policy model ( t5-base ) to generate instance-specific CoT trigger prompts, such as “ Let’s think step by step ”, to optimally prompt varying samples.  \\n\\nDataset and evaluation We adopted the experimental setup from previous work [ 26 ,79 ], where we tested zero-shot CoT reasoning abilities of InstructGPT ( text-davinci-002 ) with different trigger prompts. There are 600 examples in the MultiArith dataset [ 57 ], which we divided into 300/50/250 for training/validation/test set. As for the AQuA dataset [ 35 ], we use the standard test set with 254 samples, 300 samples from the standard training set for our training, and 100 samples for the standard validation set for our validation. We report the reasoning accuracy.  \\n\\nTable 2: Zero-shot chain of thoughts performance of InstructGPT ( text-davinci-002 ) with different prompts. ${}^{*}\\\\mathrm{Our}$ approach trains a policy model to generate instance-specific prompt triggers, which are compared to the task-specific prompts in [26, 79].   \\n\\n\\n<html><body><table><tr><td>No.</td><td>Category</td><td>Chain-of-Thought Trigger Prompt</td><td>MultiArith</td><td>AQuA</td></tr><tr><td>1</td><td>Human-Designed</td><td>Let's thinkstepby step.</td><td>79.6</td><td>31.9</td></tr><tr><td>2</td><td></td><td>Weshouldthinkaboutthisstepbystep.</td><td>81.2</td><td>28.7</td></tr><tr><td>3</td><td></td><td>First,</td><td>78.0</td><td>38.2</td></tr><tr><td>4</td><td></td><td>Beforewediveintotheanswer,</td><td>54.8</td><td>27.2</td></tr><tr><td>5</td><td></td><td>Prooffollowedbytheanswer</td><td>58.4</td><td>37.8</td></tr><tr><td>6</td><td></td><td>Let'sthinkstepbystepinarealisticway.</td><td>59.6</td><td>33.9</td></tr><tr><td>7</td><td></td><td>Let's thinkstepby step usingcommon sense and knowledge.</td><td>80.0</td><td>34.3</td></tr><tr><td>8</td><td></td><td>Let'sthinklikeadetectivestepbystep.</td><td>73.6</td><td>24.0</td></tr><tr><td>9</td><td></td><td>Let'sthinkaboutthislogically.</td><td>75.2</td><td>34.7</td></tr><tr><td>10</td><td></td><td>Let'sthink stepby step.First,</td><td>78.8</td><td>32.3</td></tr><tr><td>11</td><td></td><td>Let'sthink</td><td>56.8</td><td>38.2</td></tr><tr><td>12</td><td></td><td>Let'ssolvethisproblembysplittingit intosteps.</td><td>72.4</td><td>33.2</td></tr><tr><td>13</td><td></td><td>Theansweris aftertheproof.</td><td>42.8</td><td>34.3</td></tr><tr><td>14</td><td></td><td>Let'sberealisticandthinkstepbystep.</td><td>69.6</td><td>29.9</td></tr><tr><td>15</td><td>APE [79]</td><td>Let's work this out in a stepby stepway to be surewehavetherightanswer</td><td>81.6</td><td>34.3</td></tr><tr><td>16</td><td>DSP w/ SFT</td><td>(*Generated instance-specific prompt)</td><td>75.2</td><td>35.8</td></tr><tr><td>17</td><td>DSPw/SFT+RL</td><td>(*Generated instance-specific prompt)</td><td>84.0</td><td>38.6</td></tr></table></body></html>  \\n\\nSupervised fine-tuning details For supervised fine-tuning (SFT), we first run inference on the training set with the 14 human-crafted prompts tested in [ 26 ], respectively. We then selected those prompt and query pairs which resulted in a correct CoT reasoning outcome to form the training set for SFT. These query-prompt pairs were used to train a t5-base policy model for 2 epochs, with the model input being the query instance and the target output a trigger prompt.  \\n\\nRL training details After SFT, the prompts generated by the policy model were used to trigger InstructGPT for zero-shot CoT prompting. Reasoning accuracy was utilized as the reward for reinforcement learning (RL). A reward of 1 was assigned for correct reasoning results and 0 otherwise. We conducted 20 training iterations (106k episodes), with 5 epochs per batch, a batch size of 8, and a learning rate of 2e-6. The parameters for $\\\\mathrm{KL}_{\\\\mathrm{target}}$ and $\\\\beta_{0}$ were set to 0.5 and 0.001, respectively.  \\n\\nResults We compare the performance of using our generated instance-specific prompts with using the 14 human-crafted prompts which we used as the pseudo-stimulus to constitute the training set for SFT and also the prompt automatically discovered by the APE approach [ 79 ]. Note that all these 15 prompts are general task-specific and are used for the whole test set while ours are instance-specific. The performance comparison is shown in the Table 8. As can be seen, InstructGPT’s performance varies significantly when using different task-specific prompts. Compared to the 14 task-specific human-designed prompts, DSP enhances the performance with instance-specific prompts. It also outperforms the prompt discovered by the APE approach. Solely relying on supervised fine-tuning of the policy model with the dataset comprising the 14 human-designed prompts doesn’t lead to its peak performance. After fine-tuning with RL, the policy model is encouraged to explore better instance-specific trigger prompts, further improving performance.\\n\\n# 4 Related work\\nBlack-box large language models Recent years have witnessed the emergence of LLMs such as GPT-3 [ 6 ], Codex [ 9 ], InstructGPT, ChatGPT [ 46 ], PaLM [ 10 ], and LaMDA [ 66 ], which show significant promise in the field of NLP. These LLMs typically have a large number of parameters and require vast amounts of training data. Due to their scaling, these models have exhibited many emergent abilities, such as in-context learning, few-shot prompting, chain-of-thought prompting, and instruction following [ 6 ,46 ,69 ]. However, most LLMs are not open-sourced and can only be accessed via black-box APIs, through which the users send prompt queries and receive responses.  \\n\\nWhile there exist open-source LLMs such as OPT-175B [ 73 ] and Bloom [ 58 ], their local execution and fine-tuning require significant computational resources that may be infeasible for most researchers and users. However, despite their considerable performance on various tasks, LLMs often fall short of generating outputs that fully align with desired outputs on specific downstream tasks and use cases [ 16 ,42 ,18 ]. Our approach seeks to address this limitation by introducing directional stimulus generated by a small tunable LM into the prompt to provide more fine-grained guidance and control over black-box LLMs.  \\n\\nPrompt optimization and engineering Efficiently optimizing pre-trained LMs on downstream tasks by finding optimal prompts has been a focus of prior research. One approach involves tuning soft prompts, which are continuous embedding vectors that can be optimized using gradient descent methods [ 32 ,30 ,67 ,2 ,64 ]. However, the requirements of gradients and the challenge of passing gradients and continuous prompts through black-box APIs, making them less practical for the blackbox LLMs. Researchers have also tried to seek optimal prompts by designing task-specific natural language instructions and selecting proper training samples as in-context demonstrations in the prompt. These methods include manual engineering [ 50 ,6 ,56 ], editing [ 61 ,76 ], reinforcement learning [ 13 ,39 ], and automatic generation [ 79 ]. Despite these efforts, such prompts are not always effective at steering LLMs to generate desired outputs, especially for fine-grained instance-specific behaviors that are difficult to describe using task-specific instructions and demonstration examples. To address this limitation, our approach is able to provide more fine-grained instance-specific guidance generated by a small tunable policy model optimized with supervised fine-tuning and/or reinforcement learning.  \\n\\nControllable text generation The control of language models (LMs) has been extensively studied. Early approaches fine-tuned LMs on datasets containing desired attributes [ 17 ]. [ 24 ] proposed class-conditioned LMs, generating text with predefined control codes. However, direct LM training is costly. To address this, PPLM [ 12 ] trains an attribute model and passes gradients to control generation. GeDi [ 27 ] and DExperts [ 36 ] use class-conditional distributions as generative discriminators to guide generation, reducing computation complexity. These methods require either additional LM training or internal gradients and logistics, making them not applicable to black-box LLMs. Our approach proposes a solution to control black-box LLMs by inserting directional stimulus into the input query prompt and optimizing based on the return output.  \\n\\nReinforcement learning for NLP Reinforcement learning has been successfully applied to various NLP tasks, such as syntactic parsing [ 44 ,29 ], machine translation [ 71 ,28 ], summarization [ 48 ,62 ], conversational systems [ 31 ], etc. Language models define probability distributions over tokens in their vocabulary, and the text generation problem can be naturally formulated as selecting an action in an RL setting. Therefore, there have been extensive research efforts on optimizing LMs with RL, usually by aligning them with human preferences [ 80 ,70 ,40 ,62 ]. For example, the LLM InstructGPT [ 46 ] is optimized with RL to better follow users’ instructions and intent. In contrast with these works that directly update the LLMs to align with human preferences, our work optimizes a small policy model that generates text (stimulus) to guide LLMs to generate more human-preferred output instead of directly optimizing the LLMs, bypassing the inefficient LLM’s optimization.\", 'reference': '[2] Guiding Large Language Models Via Directional Stimulus Prompting., NeurIPS, 2023, chunk 6'}, 3: {'id': 3, 'title': 'Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step', 'content': '# 5 Related Work\\nChain-of-thought prompting. As an extension of few-shot prompting ( Brown et al. ,2020 ), chainof-thought has proven more generally applicable than algorithmic/structured reasoning for which intermediate step generation was initially studied, e.g., by Roy and Roth (2015 ); Ling et al. (2017 ); Chiang and Chen (2019 ); Nye et al. (2021 ). Recent studies seek to improve and analyze CoTs from different perspectives: Wang et al. (2022b )improves the original CoTs through marginalizing over diverse reasoning paths while Wang et al. (2022a ) marginalize over diverse prompts; Zelikman et al. (2022 ); Huang et al. (2022 ) improves CoT through a bootstrap manner of training on self-generated CoTs; Li et al. (2022b ) introduce voting classifiers to filter sampled CoTs before final prediction; Golovneva et al. (2022 ) introduce some automatic metrics for automatic assessment of chain-of-thoughts. This study instead focuses on enabling CoT for smaller models via distillation.  \\n\\nLearning with explanations. Hase and Bansal (2022 ) discuss how explanations can serve as inputs (Talmor et al. ,2020 ), targets (Hendricks et al. ,2016 ;Fidler et al. ,2017 ;Camburu et al. ,2018 ;Zhou et al. ,2020 ;Narang et al. ,2020 ;Kayser et al. ,2021 ;Wiegreffe et al. ,2022 ), and priors (Zhang et al. ,2016 ;Srivastava et al. ,2018 ) for machine learning models. Chain-of-thought extends earlier efforts which treat explanations as intermediate structures, generated at inference time ( Rajani et al. ,2019 ). Most related to our work is Li et al. (2022a ), who do also learn with GPT-3 generated explanations; we show multiple samples improve significantly over their single-sample method, and also use chain-of-thought prompting at inference time vs. predicting explanations+labels via independent multitasking.  \\n\\nKnowledge distillation. Recent work, inspired by Knowledge Distillation ( Hinton et al. ,2015 ), has considered symbolic knowledge distillation, (West et al. ,2022 ), i.e., instead of distilling from soft representations like logits, large language model serve as training data generators ( Xiong et al. ,2019 ;Petroni et al. ,2019 ;Schick and Schütze ,2021 ;West et al. ,2022 ;Liu et al. ,2022 ;Meng et al. ,2022 ;Bhagavatula et al. ,2022 ); this paper continues this line of work.  \\n\\nContemporaneous work. There are several contemporaneous papers: Huang et al. (2022 ), Magister et al. (2022 ), and Ho et al. (2022 ) all show that smaller models can benefit from large models’ chains of thought. We contributes beyond these by: 1) showing that sampling a large number of chain-of-thoughts is paramount; 2) exploring transfer performance to challenge sets/unseen tasks; and 3) analysis that address what factors are important in the teacher corpus.\\n\\n# 6 Conclusion\\nWe demonstrate the effectiveness of Symbolic Chain-of-thought Distillation (SCoTD): a method that enables smaller language models to effectively use chain-of-thought-style reasoning. We demonstrate the method’s effectiveness across several downstream tasks, different student model sizes, different levels of supervision, and in difficult settings (challenge sets, unseen tasks). Our ablations shed light on what factors are particularly important to distill in these chain-of-thoughts.  \\n\\nOur concrete recommendations are: 1) sampling multiple and diverse CoTs for each input instance, and 2) performing self-consistency when the teacher CoTs are noisy. Several promising avenues for future work include:  \\n\\n1. Exploring SCoTD for generation tasks in addition to classification tasks;   \\n2. Scaling up the number of source tasks in $\\\\S\\\\ 3.5$ to generalize to more tasks;   \\n3. Using the down-sampling setup introduced in $\\\\S4$ to explore additional hypotheses about what other factors may be of importance in CoTs.\\n\\n# Limitations\\nSeveral limitations of our study include:  \\n\\n1. only English-language chain-of-thoughts/tasks considered;   \\n2. reliance on GPT-3, which is a closed-source product with an unknown training set (which could itself include some explanations); and   \\n3. focusing only on a single type of student model, OPT.  \\n\\nMore broadly, learning from and with explanations carries some specific risks related to automation bias. While a model might rationalize its predictions using a seemingly coherent string of natural language steps, even if it eventually gets the prediction correct, there’s no guarantee that the eventually predicted output actually results from a process represented by the rationalization. A user might assign excessive confidence to that system based on the chain-of-thought. We observed many cases where the chain of thought seemed promising only to result in models ultimately making incorrect predictions in the final few tokens. Caution should be taken when displaying chain-of-thoughts to users.\\n\\n# Acknowledgment\\nWe thank anonymous reviewers for their comments. This work is supported in part by the DARPA MCS program, NCSOFT NLP Center and a Sloan research fellowship.\\n\\n\\n\\n# A Crowdworking details\\nA screenshot of the interface we use to collect the pairwise human judgments from $\\\\S3.1.1$ is given in Figure 8 . We conduct a post-hoc analysis using a javascript timer to ensure that annotators were paid at least $\\\\mathbb{S}15/\\\\mathrm{hr}$ : crowdworkers who didn’t meet this hourly rate during annotation were awarded bonuses post-hoc to ensure they were paid that rate. We select crowdworkers with IP addresses in US,CA,NZ,AU,GB.  \\n\\nIRB Information Crowdworking studies of standard NLP corpora (involving no personal disclosures) are not required by our IRB to be reviewed by them. While the authors of this work are not lawyers and this is not legal advice, this opinion is based on United States federal regulation 45 CFR 46, under which this study qualifies as exempt. We do not release crowdworker IDs, so annotations cannot be back-traced to individual workers.\\n#', 'reference': '[3] Symbolic Chain-of-Thought Distillation: Small Models Can Also \"think\" Step-by-Step, ACL, 2023, chunk 4'}, 4: {'id': 4, 'title': 'Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution.', 'content': '# 2 RELATED WORK\\nPrompting an LLM in the right way is essential to its downstream performance ( Moradi & Samwald ,2021 ;Madaan & Yazdanbakhsh ,2022 ;Zhou et al. ,2023 ). Indeed, even the order in which prompts are presented can heavily influence LLM performance ( Lu et al. ,2022 ). A number of recent works have focused on devising better prompt strategies, or even automating such prompt engineering.  \\n\\nPrompting : Chain-of-Thought Prompting (CoT, Wei et al. ,2022 ) is a popular prompt strategy which provides intermediate reasoning steps as few-shot prompts to an LLM, thereby significantly improving its arithmetic, commonsense, and symbolic reasoning abilities. Notably, the gains of CoT are more pronounced for stronger LLMs. This is intriguing, as it points to the possibility of increasingly capable (and potentially open-ended) self-improving mechanisms on top of adept LLMs—a hypothesis that Promptbreeder directly builds upon. Instead of few-shot CoT prompting, Kojima et al. (2022 ) demonstrate that LLMs can also be prompted zero-shot (e.g. \"Let’s think step by step\" ) to produce their own chains of thoughts (Zero-shot CoT) that improve reasoning abilities. Self-Consistency (CoT-SC, Wang et al. ,2022 ) extends CoT by sampling a diverse set of workings out and selecting the most consistent answer. Tree of Thoughts (ToT, Yao et al. ,2023 ) generalizes CoT to multiple workings out that can be expanded or backtracked from. Graph of Thoughts (GoT, Besta et al. ,2023 ) is a further generalization to arbitrary graph structures. Plan-and-Solve Prompting (PS, Wang et al. ,2023b ) encourages an LLM to first devise a plan to solve a problem before attempting to solve it. Similarly, Least-to-Most Prompting ( Zhou et al. ,2022 ) encourages an LLM to decompose a problem into subparts, and then to solve each part individually before synthesizing an answer. Self-Refine ( Madaan et al. ,2023 ) prompts an LLM to generate a response, to provide feedback on the response, and to finally refine the solution.  \\n\\nIn contrast to gradient-free approaches above, Soft Prompting approaches (e.g., Liu et al. ,2021 ;Qin & Eisner ,2021 ;Lester et al. ,2021 ) directly fine-tune continuous prompt representations. Huang et al. (2022 ) use CoT and CoT-SC on an unlabelled dataset of questions, and subsequently finetune an LLM based on generated solutions. Similarly, Zelikman et al. (2022 ) uses CoT to generate rationales and fine-tunes the LLM based on those examples and rationales that yielded the correct answer. However, as argued by Zhou et al. (2023 ), any approach that updates all or a portion of LLM parameters will not scale as models get bigger and, moreover, will not work with the increasing number of LLMs hidden behind an API.  \\n\\nAll of the prompt engineering approaches above are domain agnostic but hand designed. Central to our work is the hypothesis that we could do better by employing an automated self-improvement process that can adapt prompts to a domain at hand. Auto-CoT ( Zhang et al. ,2023b ) and AutomaticCoT ( Shum et al. ,2023 ) automatically find reasoning chains for Few-Shot CoT. Automatic Prompt Engineer (APE, Zhou et al. ,2023 ) uses one generator-prompt to generate prompt candidates, and another mutation-prompt to mutate them. In contrast to APE, our work performs compositional task-specific initialization of mutation-prompts, subsequent online mutation of mutation-prompts, uses special mutation operators that take into account the whole population and elite history, and uses diversity-maintenance methods—all of which help avoid the problem of diminishing returns and diversity loss suffered by APE.  \\n\\nConcurrently to our work, Yang et al. (2023a ) developed Optimization by PROmpting (OPRO), a prompt optimization method that varies prompts using a single complex mutation prompt, and evaluates newly generated prompts on a small fixed training set of problems. In contrast, Promptbreeder autonomously evolves multiple LLM generated mutation-prompts as well as task-prompts, and evaluates fitness on random subsets from the whole training set during evolution. At the time of its release, OPRO achieved a score of $80.2\\\\%$ via the optimized zero-shot prompt \"Take a deep breath and work on this problem step-by-step\" on GSM8K. Promptbreeder surpasses this with $83.9\\\\%$ in the zero-shot setting with the unintuitively simple prompt \"SOLUTION\"\" —further evidence for the sensitivity of LLMs to prompts and the importance on finding effective prompts automatically. Also concurrently to our work, Guo et al. (2023 ) developed EvoPrompt, which uses a fixed mutation (and crossover) prompt, as well as a prompt that asks for a mutant of the difference between two parent prompts, to produce offspring prompts. EvoPrompt is initialized with a whole population of initial hand-designed task tailored prompts rather than a single problem description as we do. In contrast to the two approaches above, Promptbreeder uses LLMs to self-referentially improve mutation-prompts, and it is able to evolve contexts as well.  \\n\\nSelf-Referential Self-Improvement : Developing an open-ended system that can improve itself as well as improving the way it is improving itself ( Schmidhuber ,1993 ;2003 ) is a long-standing open problem in AI research. Schmidhuber (1993 ) introduced an “introspective” neural network with a self-referential weight matrix that can modify its own weights and, thus, also modify those weights that are governing how its own weights are modified. Recently, Irie et al. (2022 ) proposed a more scalable self-referential weight matrix taking inspiration from fast weight programmers ( Schmidhuber ,1992 ). Kirsch & Schmidhuber (2022 ) propose a self-referential meta-learning approach, combining self-referential weight matrices with ideas from G¨odel Machines ( Schmidhuber ,2003 ), i.e., to allocate more computational resources to better performing solutions. However, since these approaches directly modify parameters of a model, it is unclear how to scale them to the increasing number of parameters in modern LLMs. In contrast, for Promptbreeder the substrate of selfreferential self-improvement is natural language, avoiding costly parameter updates altogether.  \\n\\nOpen-Endedness and LLMs : Promptbreeder makes use of the observation by Lehman et al. (2022 ), Meyerson et al. (2023 ) and Chen et al. (2023 ) that LLMs are effective at generating mutations from examples. In addition, LLMs encode human notions of interestingness and can be used to automatically quantify novelty ( Zhang et al. ,2023a ). Promptbreeder is related to Picbreeder ( Secretan et al. ,2008 ), an open-ended human-in-the-loop system that evolves increasingly interesting images. While Picbreeder explores the space of images, Promptbreeder explores the space of prompts and does so without humans in the loop. As Promptbreeder is proposing mutated prompts to itself, it is an example of a system transitioning from “learning from data” to “learning what data to learn from” ( Jiang et al. ,2022 ).', 'reference': '[4] Promptbreeder: Self-Referential Self-Improvement Via Prompt Evolution., ICML, 2024, chunk 2'}, 5: {'id': 5, 'title': 'Iteratively Prompt Pre-trained Language Models for Chain of Thought', 'content': '# 1 Introduction\\nHumans can develop a “train of thought” for complex decision making. For example, when asked the question ( Q) shown in Figure 1 , which involves composition, an important type of multi-step inference, humans apply two consecutive steps to derive the final answer: 1) find the “father” of the topic entity “Gwilym Lloyd George” ( E1 ); 2) find the “birthplace” of the entity returned in the first step (E2 ).  \\n\\nRecently, large-scale pre-trained language models (PLMs) have been shown capable of internalizing a great amount of simple factual knowledge such as E1 and E2 , yielding competitive performance on a range of knowledge-intensive tasks without resorting to any external knowledge source (Petroni et al. ,2019 ;Shin et al. ,2020 ;Zhong et al. ,2021 ;Roberts et al. ,2020 ;Lee et al. ,2020 ). However, work such as ( Talmor et al. ,2020a ;Kassner et al. ,2020 ;Rae et al. ,2021 ) reveals that PLMs face difficulties in complex, multi-step inferences. For example, they struggle with answering complex questions like Qwithout using external sources, no matter whether they are fine-tuned based on QA pairs or simply prompted to produce the answer (where even if they have memorized E1 and E2 ).  \\n\\n  \\nFigure 1: Our Iterative Prompting approach for deriving a “train of thoughts” with a PLM (on the right), compared with standard knowledge probing (on the left).  \\n\\nIn this paper, we study the following question: How to shepherd a PLM to recall a series of stored knowledge (e.g., E1 and E2 ) that is necessary for multi-step inference (e.g., answering Q), analogous to how humans develop a “train of thought” for complex decision making?  \\n\\nA direct way would be to fine-tune the PLM to generate the series of knowledge all at once (assuming such supervision is available), but soon one realizes the practical issue in this approach: PLMs which internalize a great amount of knowledge are inevitably large in scale, and fine-tuning all their parameters would become more and more costly as they keep scaling up. There’s also the potential concern that fine-tuning PLMs may interfere with their implicit knowledge storage, a phenomenon observed in ( Wang et al. ,2021 ) which is more generally related to the catastrophic forgetting problem of deep learning models ( McCloskey and Cohen ,1989 ;Kirkpatrick et al. ,2017 ). Therefore, lightweight methods such as prompting ( Liu et al. ,2021 ) which keep a PLM’s parameters intact would be more preferable for our purpose of eliciting knowledge. However, we find that no matter whether it is fine-tuned or prompted to generate the series of knowledge all at once, the PLM tends to lose its “train of thought” during the process, generating irrelevant facts or suffering from hallucination.  \\n\\nHence we explore an iterative prompting framework in this paper, which elicits knowledge from PLMs step by step for a given inference task. We have two desiderata in iterative prompting: (1) At different inference steps, the prompts need to focus on different components of the complex query. (2) The prompts should appropriately integrate knowledge gathered in previous steps into the current step; for instance, during the second step in the example in Figure 1 , the prompts need to combine the entity “David Lloyd George” (from knowledge recalled in the first step) with the unresolved part “What is the place of birth of ...” in the query.  \\n\\nA natural thought is to directly apply existing prompting methods in an iterative fashion. Unfortunately, their prompts are either restricted to queries with a single, identifiable relation/predicate (Jiang et al. ,2020 ;Petroni et al. ,2019 ;Zhong et al. ,2021 ;Shin et al. ,2020 ;Qin and Eisner ,2021 ), or being agnostic and insensitive to step-wise inputs (Lester et al. ,2021 ;Li and Liang ,2021 ;Brown et al. ,2020 ), and hence not ideal for our desiderata.  \\n\\nWe design a novel iterative prompting method towards that end. We augment a PLM with an iterative Context-Aware Prompter , a model which learns to dynamically synthesize prompts based on the current step context. At each step, the Prompter learns to process the query and all previously gathered evidence, and composes an appropriate prompt which steers the PLM to recall the next piece of knowledge. Like other prompting methods, all parameters of the PLM are kept fixed throughout the learning process. In addition, as the PLM size increases, the number of trainable parameters in our method scales comparably with or slower than previous prompting methods.  \\n\\nWe conduct experiments on three datasets involving multi-step inference, including two recent multi-hop Question Answering datasets: 2WikiMultiHopQA ( Ho et al. ,2020 ) and R4C ( Inoue et al. ,2020 ), and a scientific dataset ( Talmor et al. ,2020b ) for reasoning over taxonomic relations. For each compared method, we consider both iterative and non-iterative settings. Our experimental results show (1) effectiveness of the iterative scheme; (2) our proposed Context-Aware Prompter design outperforms existing prompting methods by notable margins; (3) quantitative and qualitative analysis which reveal the faithfulness of our learned prompter.', 'reference': '[5] Iteratively Prompt Pre-trained Language Models for Chain of Thought, EMNLP, 2022, chunk 1'}, 6: {'id': 6, 'title': 'Successive Prompting for Decomposing Complex Questions', 'content': '# 5 Related Work\\nPrompting methods Prompting was introduced as a way to test the reasoning capabilities of large language models ( Brown et al. ,2020 ). In followup works ( Schick ,2022 ;Chowdhery et al. ,2022 ;Marasovi´c et al. ,2021 ) prompting techniques have been used as a mechanism to supervise the model decision with few demonstrations as a conditioning context to guide its predictions on an unseen example. Works like Chain-of-Thought reasoning ( Wei et al. ,2022 ;Zelikman et al. ,2022 ) especially focus on compositional questions where they provide a chain of reasoning as demonstrations. In concurrent work, Least-to-Most prompting ( Zhou et al. ,2022 ) takes a similar view as ours to break down the problem into sub-problems. However, in Successive Prompting the question decomposition and answering stages are interleaved, unlike Least-toMost where the problem is first reduced into subproblem and then executed in a sequence. In our method, the next question prediction has access to previously answered sub-questions, which is useful in questions that need long chain referencing. Other contemporaneous works ( Press et al. ,2022 ;Khot et al. ,2022 ) use very large language models (more than twice the size we used) and show better few-shot generalization. Works like Perez et al. (2021 ) have shown the importance of having the right in-context examples for downstream performance leading to works that learn to retrieve relevant in-context examples ( Rubin et al. ,2021 ).  \\n\\nNon-symbolic methods Most non-symbolic methods are sequence-to-sequence models trained on a large amount of question answering data ( Khashabi et al. ,2020 ;Yoran et al. ,2021 ).  \\n\\nSymbolic methods Neural module networks like approaches parse complex questions into a prespecified grammar and learn neural components to handle symbolic mathematical operations ( Gupta et al. ,2020 ;Chen et al. ,2020 ;Nye et al. ,2021 )which are recursively executed. State-of-the-art models on DROP, however, use a combination of BERT-based contextual models along with a calculator that performs discrete operations ( Andor et al. ,2019 ;Segal et al. ,2020 ;Hu et al. ,2019 ). Works like Text Modular networks ( Khot et al. ,2021 ) and MRKL ( Karpas et al. ,2022 ) are closest to our work. However, they are limited in the terms of types of simple questions they can answer (single-span only) and the complexity of reasoning they can do (single-order only). TMNs, additionally, use a classifier that scores the generated chains module and filters out incorrect question decompositions, while we use contrastive estimation to learn a better question decomposer and as a result do not need a chain scorer.\\n\\n# 6 Conclusion\\nWe present a way to successively decompose complex questions into simple QA pairs, which allows for modular QD and QA systems that can be trained and queried independently. When performing in-context learning, we showed that successive prompting yields an improvement of 4.6 F1 over chain-of-thought prompting. When replacing just the in-context QA module with a fine-tuned one, which is adept at handling list type questions, we further improve the overall performance by 9.5 F1. We believe that modular systems that decompose and delegate tasks to the most appropriate model, whether that is a large LM or a tailored component, are more e ffective at solving complex tasks than trying to have a large LM solve the entire task on its own. Successive prompting shows one way this decomposition and delegation can be done.\\n\\n# Acknowledgements\\nWe would like to thank Anthony Chen, Catarina Belem and the anonymous reviewers for the discussions and feedback. This material is based upon work sponsored in part by the DARPA MCS program under Contract No. N660011924033 with the United States O ffice Of Naval Research, in part by funding by AI2 and NSF IIS-1817183. We would also like to thank Hasso Plattner Institute(HPI) for supporting the first author through UCI-HPI fellowship. The views in this work are of authors and not the sponsors.\\n\\n# Limitations\\nWe propose a way to decompose complex questions into interpretable simple QA pairs as latent steps that get successively asked and answered by large pretrained models. The notion of performing complex tasks by iteratively finding and then filling information needs is very general, but we have only shown the applicability of one specific version of this idea in one specific setting. There are many potential challenges in applying successive prompting more broadly. The biggest is that it requires at least some decomposition data, which may be hard or even impossible to obtain. Some complex questions are not easily decomposed, and some domains can be very challenging to write synthetic data generators for. We were able to generate synthetic data that covered most of the reasoning types in DROP, but other kinds of complex questions would not be covered by our generator (e.g., questions that require commonsense or causal reasoning).  \\n\\nThere is also significant di fficulty in choosing a level of granularity for decomposition. If a large pretrained model can directly answer a question as complex as “What was Barth’s second field goal?”, we should let the model answer the question instead of trying to decompose it further. The right granularity for the decomposition thus depends on the capabilities of the underlying model, and those capabilities are rapidly changing as newer and larger pretrained models are released. There is the possibility that newer model iterations will not need any decomposition to answer the complex questions covered by our synthetic data generator, making that generator obsolete. However, it seems unlikely that pretrained models will be able to handle all complex scenarios in the near future, so the ideas of successive prompting and generating synthetic data to bridge reasoning gaps should still be applicable even when our particular application of them becomes obsolete.', 'reference': '[6] Successive Prompting for Decomposing Complex Questions, EMNLP, 2022, chunk 6'}, 7: {'id': 7, 'title': 'Get an A in Math: Progressive Rectification Prompting', 'content': '# Get an A in Math: Progressive Rectification Prompting\\nZhenyu $\\\\mathbf{W}\\\\mathbf{u}^{1}$ , Meng Jiang 2 , Chao Shen 1  \\n\\n1 School of Cyber Science and Engineering, Xi’an Jiaotong University 2 Department of Computer Science and Engineering, University of Notre Dame, Notre Dame, IN 46556 zhenyuwu $@$ stu.xjtu.edu.cn, mjiang2 $@$ nd.edu, chaoshen $@$ xjtu.edu.cn\\n\\n# Abstract\\nChain-of-Thought (CoT) prompting methods have enabled large language models (LLMs) to generate reasoning paths and solve math word problems (MWPs). However, they are sensitive to mistakes in the paths, as any mistake can result in an incorrect answer. We propose a novel method named Progressive Rectification Prompting (PRP) to improve average accuracy on eight MWP datasets from 77 .3 to 90 .5 . Given an initial answer from CoT, PRP iterates a verify-then-rectify process to progressively identify incorrect answers and rectify the reasoning paths. With the most likely correct answer, the LLM predicts a masked numerical value in the question; if the prediction does not match the masked value, the answer is likely incorrect. Then the LLM is prompted to regenerate the reasoning path hinted with a set of incorrect answers to prevent itself from repeating previous mistakes. PRP achieves the best performance compared against the CoT methods. Our implementation is made publicly available at https://wzy6642.github.io/prp.github.io/.  \\n\\nFirst, to distinguish correct and incorrect answers, existing methods repeatedly solve a problem and use a majority vote strategy to determine the most consistent answer as the correct answer. This is known as self-consistency (Wang et al. 2023b). However, since it solves the same problem multiple times, this repeated independent process leads to same mistakes, making the frequent answer still incorrect. Second, existing methods such as progressive-hint prompting (Zheng et al. 2023) modify reasoning paths by adding “(Hint: The answer is near $[\\\\mathsf{H}]^{\\\\,,}$ after the given problem, where [H]is the slot of previous answers. It is evident that when previous answers are incorrect, LLMs may still generate an incorrect answer in response to the hint. Third, existing CoT prompting methods exhibit high sensitivity to mistakes in intermediate reasoning steps (Kojima et al. 2022; Chen et al. 2022; Wang et al. 2023a; Shi et al. 2023). Even a tiny mistake in the reasoning process could alter the entire problem-solving process, resulting in an incorrect answer. It is nontrivial to achieve multi-step precise reasoning.', 'reference': '[7] Get an A in Math: Progressive Rectification Prompting, AAAI, 2024, chunk 0'}, 8: {'id': 8, 'title': 'Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling', 'content': '# 5 Related Work\\nIn-Context Learning is an emergent ability of LLMs as they scale up in model sizes and training data, where an LLMs can learn to perform a task from a few examples in the context (which is also referred to as few-shot prompting) [Brown et al., 2020]. It has been shown to achieve promising few-shot and even zero-shot performance on various natural language processing [Brown et al., 2020, Schick and Schütze, 2020, Perez et al., 2021] and program synthesis [Austin et al., 2021] tasks.  \\n\\nReasoning via Chain-of-Thought Prompting Chain-of-Thought (CoT) prompting is a technique that enables LLMs to perform complex reasoning tasks by prompting them with a few examples with step-by-step solutions [Wei et al., 2022, Suzgun et al., 2022]. CoT prompting has been shown to improve performance on various reasoning tasks, such as arithmetic reasoning [Wei et al., 2022, Zhou et al., 2022], symbolic reasoning [Wei et al., 2022, Zhou et al., 2022], multi-hop question answering [Press et al., 2022, Arora et al., 2022], and natural language inference [Wang et al., 2022b]. However, designing effective CoT prompts requires human experts with an understanding of both the task and the prompting technique [Zamfirescu-Pereira et al., 2023], which limits the scalability and generalizability of CoT prompting.  \\n\\nSeveral works have attempted to automate the process of CoT prompt discovery. Zhang et al. [2022] proposed to use LLMs to generate CoT solutions for diverse training questions in zero-shot and integrate the generated CoT solutions in the prompt for solving test questions. This method can be seen as a special version of our Reprompting algorithm with $M=0$ iterations, while our experiments demonstrate that Reprompting improves the CoT solutions on the training examples through iterations. Deng et al. [2022], Zhang et al. [2023] proposed to train an additional policy model to find the best prompt through reinforcement learning, but their approaches are limited to prompt optimization within a relatively small search space (i.e. it is restricted to the prompts that are either extremely short or within a small edit distance from an initial prompt). Zhou et al. [2023] proposed a method for automatically generating, scoring and selecting effective instruction messages $m$ for zero-shot chain-of-thought reasoning, which is orthogonal and can be potentially combined with our algorithm. Paranjape et al. [2023] introduced a framework that automatically retrieves demonstrations of related tasks from a task library and generates CoT solutions for the new task. However, this framework still requires collective human efforts to write demonstrations for a diverse set of tasks in the task library. In contrast, our Reprompting algorithm enables LLMs to solve complex reasoning tasks without any human guidance. Additionally, Yoran et al. [2023] proposed a multi-chain reasoning (MCR) method that prompts LLMs to combine pieces of information from multiple chains of thought to predict the final answer, which differs from our method in two ways: first, MCR combines multiple CoT solutions to the same question at test time, while Reprompting combines CoT solutions generated for different training questions before testing; second, MCR combines solutions only once, whereas Reprompting iteratively samples new solutions and recombines them. As a result, Reprompting generates effective CoT recipes from only a few training examples, resulting in improved test performance without slowing down test inference.\\n\\n# 6 Conclusion\\nWe introduce Reprompting , an automated prompt inference algorithm which, without human effort, discovers effective chain-of-thought (CoT) prompts for each task given a few question-answer pairs. On five Big-Bench Hard (BBH) tasks, prompts discovered with Reprompting consistently outperform zero-shot, few-shot, and human-written CoT prompts. Furthermore, performance of a weak LLM is significantly aided by generating the initial CoT solutions using a stronger LLM and then reprompting using the weak LLM to optimize the prompt for itself. Overall, Reprompting achieves up to $+17$ point improvements over the previous state-of-the-art on BBH tasks, which was based on human-written prompts. Our results suggest that LLM comparisons can be highly sensitive to CoT selection, further emphasizing the need for automatic prompt discovery and optimization using algorithms such as Reprompting .\\n\\n\\n\\n# A Compute and Resources\\nWe use the OpenAI APIs for all our experiments. The average cost of running Reprompting per task (using either the Gibbs sampling or the greedy version) is around $\\\\mathbb{S}80$ (in US dollars) for gpt-3.5-turbo and $\\\\mathbb{S}800$ for text-davinci-003 based on the standard pricing.\\n\\n# BAdditional Illustrations\\nOn sensitivity to initialization We have shown that Reprompting can be sensitive to initial zeroshot recipe generation. In each task we tested, armed with a suitable prompt InstructGPT could reach test set accuracy equalling or besting ChatGPT. However, such a prompt could not be discovered if the prompt recombination and evolution through Reprompting was started with initial prompts generated by InstructGPT itself. Fig. B.1 points to a likely explanation: ChatGPT can generate a wider range of useful recipes, and whether these initial recipes lead to the correct solution or not, InstructGPT can follow them and, through Reprompting , refine them and correct them. Thus, as we have shown in our experiments, with good initialization, LLMs that may appear inferior based on their zero-shot performance may end up performing just as well or better than LLMs whose zero-shot performance is more encouraging. It would be interesting to see if Reprompting can use other LLMs in initialization to perform even better, or if the humans can be put back into the loop to provide some initial recipes, or some generic instructions on how to generate them.  \\n\\nOn transferability of discovered recipes The fact that $L L M_{1}$ (ChatGPT) can point $L L M_{2}$ (InstructGPT) in the right direction(s) for prompt discovery does not mean that the discovered prompts, having been optimized for training performance on $L L M_{2}$ will perform well when used to prompt $L L M_{1}$ .In fact, Table 2 in the main text indicates that the discovered CoT recipes that work for one model may not necessarily work for other models. For example, in the case of Temporal Sequences , the test performance is achieved with a prompt trained with InstructGPT (after initialization with ChatGPT as $L L M_{1}$ ). But when using that prompt in testing with ChatGPT the performance is by $18\\\\%$ lower. Figure B.2) illustrates the solution strategy that emerged from training: The time intervals that need to be reasoned over are sorted, and among the sorted list, the missing interval was inserted as the possible interval when the person in question could have performed an activity. Then the answer is generated. InstructGPT follows this procedure with accuracy over $99\\\\%$ , but ChatGPT sometimes skips the generation of that crucial line (for this recipe) with the missing interval within the timeline.  \\n\\n  \\nFigure B.2: An example on Temporal Sequences where ChatGPT underperforms InstructGPT using the same CoT prompt optimized for InstructGPT via Reprompting (using ChatGPT+InstructGPT). ChatGPT fails to correctly execute the recipe as it skips a key step (the blue underlined text from InstructGPT) to reach the final answer. (The illustration does not show the full prompt that precedes the puzzle $x$ for brevity; it consists of 5 training examples with worked-out solutions that all follow the same strategy of solving these types of problems.)  \\n\\nTherefore, among initial “ideas” from ChatGPT, some can be refined to work well for InstructGPT, and others can be refined to work well for ChatGPT itself, as the best performance of ChatGPT (using the CoT prompt optimized for itself) is only slightly lower than that of the ChatGPT+InstructGPT combination.  \\n\\nThese results suggest that fair comparison between different LLMs may be difficult, as one needs to optimize the CoT prompt for each model, and that optimization is typically non-trivial.  \\n\\nHow do the model-generated CoT recipes differ from human-written ones? In the main text, We evaluated the performance of the CoT recipes generated using Reprompting and contrasted it with human-written ones in Suzgun et al. [2022]. As illustrated by the example recipes in Figure B.3, the model-generated CoT recipes share some similarities to human-written ones on some tasks (such as Logical Deduction ), but differs on other tasks. For instance, on Object Counting , the CoT generated using Reprompting computes the total number of objects by incrementing the count one by one (e.g. adding 4 to the count 5 by “ [6 ,7 ,8 ,9 ]”), while in the human written recipe, it computes the addition through an arithmetic formula at the end. On the task of Penguins in a Table , the automatically generated CoT inserts a piece of code to solve the task, whereas the CoT crafted by humans employs natural language to reason over the table. The full prompts that yield the best performance on each task are provided in the supplementary material.  \\n\\n  \\nFigure B.3: Examples of the best-performing CoT recipes inferred via Reprompting on Logical Deduction (best score: 66.3), Geometric Shapes (best score: 73.2), Object Counting (best score: 99.6), Penguins in a Table (best score: 81.5), Temporal Sequences (best score: 99.2), and Causal Judgement (best score: 68.4).', 'reference': '[8] Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling, ICML, 2024, chunk 3'}, 9: {'id': 9, 'title': 'Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication', 'content': '# 2 Related Work\\n\\n# 2.1 Chain-of-Thought prompting in LLMs\\nWei et al. (2022b ) highlight that LLMs can manifest enhanced reasoning capabilities when being prompted by demonstrations with intermediate reasoning steps. This technique can effectively improve the performance of LLMs on complex reasoning tasks ( Wei et al. ,2022a ;Kojima et al. ,2022 ). A series of strategies for enhancing CoT has been proposed to further improve the performance of LLMs. One such method is program-aided language models ( Gao et al. ,2022 ;Chen et al. ,2022 ), which aims to decouple reasoning and computation through program synthesis. Moreover, complex tasks can also be transformed into delegable sub-tasks through modular approaches ( Khot et al. ,2023 ). Choosing appropriate demonstrations can also enhance the performance of CoT ( Li et al. ,$2023\\\\mathbf{a}$ ;Li and Qiu ,2023a ). Notable among these, AutoCoT ( Zhang et al. ,2023b ) uses an automated way to construct and sample diverse demonstrations. Active-Prompt ( Diao et al. ,2023 ) selects the most helpful samples for labeling based on the model’s uncertainty in the outputs. Recently, Li and Qiu (2023b ) employ a strategy of storing high-confidence thoughts as external memory and retrieves these insights to aid the reasoning process.\\n\\n# 2.2 Ensemble of Reasoning Paths\\nLLMs have the ability to explore multiple reasoning paths using techniques such as temperature adjustment and prompt sampling ( Chu et al. ,2023 ). Wang et al. (2023c ) suggest that for complex questions, there may be several correct paths to approach a problem, leading to the proposal of Self-Consistency. This method replaces the greedy decoding strategy with the sampling of multiple reasoning paths and selecting the most consistent answer, resulting in significant performance improvements. Beyond that, Fu et al. (2023b ) discover that prompts with higher reasoning complexity could achieve better performance in multi-step reasoning tasks, leading to the proposal of complexitybased prompting. While other methods, such as re-ranking ( Cobbe et al. ,2021 ;Thoppilan et al. ,2022 ), have also been applied to select suitable reasoning paths, they often rely on heuristic or trained smaller models. Recently, Li et al. (2023b ) sample different demonstrations and use step-by-step verification to filter out incorrect answers. However, obtaining step-level labels can be challenging, and using smaller models for judgment struggles to handle complex reasoning processes. In contrast, our method fully utilizes the communication and decision-making capabilities of LLMs to reach the final answer, without the need for additional training and annotated data.\\n\\n# 2.3 Reasoning Path Refinement\\nAlthough CoT ( Wei et al. ,2022b ) effectively enhances the performance of LLMs in complex reasoning tasks, they remain susceptible to errors during the reasoning process, leading to incorrect answers ( Bai et al. ,2022b ;Lyu et al. ,2023 ). To mitigate this issue, starting from the model’s own thoughts, Shinn et al. (2023 ) and Madaan et al. (2023 ) employ the model’s own feedbacks and past mistakes to refine the reasoning process. Yao et al. (2023 ) explore the synergies between reasoning chains and action plans. For numerical problems, Zheng et al. (2023 ) gradually guide models to the correct answer by using previously generated answers as hints. With the aid of external knowledge, Wang et al. (2023a ) introduce chain-of-knowledge prompting that employs evidence triples to curb the generation of unfactual and unfaithful answers. Taking model interactions into account, multi-agent debates ( Du et al. ,2023 ;Liang et al. ,2023 ) have been introduced to enhance the factual accuracy of generated content and reduce fallacies and hallucinations. EoT differs from these efforts as we prioritize enhancing the current reasoning process generated by a single model by incorporating the reasoning processes from other models as external insights through cross-model communication.\\n\\n# 3 Preliminary\\nFirstly, we define the current methods that use LLMs to solve problems. We denote a LLM with a parameter size of length as $t$ , which includes tokens $\\\\theta$ as $p_{\\\\theta}$ , and the sequence $\\\\left[{{s}_{1}},{{s}_{2}},\\\\ldots,{{s}_{t}}\\\\right]$ .The LLM predicts the next token based on the prior tokens in the sequence. The probability of the probability of the whole sentence is $s_{i}$ $p_{\\\\theta}(s_{i}|s_{1},s_{2},\\\\ldots,s_{i-1})$ . T $p_{\\\\theta}(s)\\\\,=$ ()$\\\\begin{array}{r}{\\\\prod_{i=1}^{t}p_{\\\\theta}(s_{i}|s_{\\\\le i-1})}\\\\end{array}$ .  \\n\\nStandard prompting. Standard prompting involves deriving an answer $a$ from a question $q$ using $p_{\\\\theta}(a|q)$ . In-Con et al. ,2020 )aims to improve LLMs performance by adding demonstrations $D=\\\\{d_{1},d_{2},\\\\ldots,d_{n}\\\\}$ {to the input, which can be expressed as $p_{\\\\theta}(a|D,q)$ .  \\n\\nCoT prompting. As identified by Wei et al. (2022b ), the incorporation of intermediate reasoning steps can improve the proficiency of LLMs in tackling complex reasoning challenges. To facilitate this, a rationale $r_{i}$ is added to demonstration $d_{i}\\\\,=\\\\,\\\\{q_{i},r_{i},a_{i}\\\\}$ to guide e LLMs in explicitly generating reasoning steps. Fu et al. (2023b ) observe that using rationale $r_{i}$ with more complex reasoning steps for demonstrations can further enhance the model’s reasoning performance.  \\n\\nSelf-Consistency. Self-Consistency method, introduced by Wang et al. (2023c ), effectively consolidates answers from multiple independent reasoning chains. This technique prioritizes the most commonly occurring answer, defined as $a=\\\\operatorname{argmax}_{a_{i}}f(a_{i})$ , w re $f(a_{i})$ denotes the frequency of each answer $a_{i}$ . This approach enables the model to explore a broader range of reasoning pathways, thereby enhancing its reasoning ability. However, it remains constrained by the intrinsic limitations of LLMs’ capabilities.  \\n\\n  \\nFigure 3: Correspondence between communication paradigms and network topologies. The top row depicts four network topologies. The second row correlates these with the corresponding communication paradigms. The bottom row offers an analysis of the communication volume associated with each paradigm. The horizontal axis represents the information that the node can receive, while the vertical axis indicates the information that the node can send.  \\n\\nProgressive-Hint Prompting. Introduced by Zheng et al. (2023 ), Progressive-Hint Prompting (PHP) leverages a sequence of historical answers $\\\\{a^{(1)},a^{(2)},\\\\bar{\\\\dots},a^{(j-1)}\\\\}$ soning process the subsequent answer $r^{(j)}$ and facilitate the derivation of a $a^{(j)}$ ().', 'reference': '[9] Exchange-of-Thought: Enhancing Large Language Model Capabilities Through Cross-Model Communication, EMNLP, 2023, chunk 1'}}\n"
     ]
    }
   ],
   "source": [
    "response = writer.get_reference_content(topic)\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_values([])"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "response = {}\n",
    "response.values()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第六步将文献综述初稿保存到本地\n",
    "log.info(f\"=========文献综述初稿保存===========\")\n",
    "lit_review.LitReview_to_md(\"lite_review/\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
