# -*- coding: utf-8 -*-
import asyncio
import copy
import re
import aiofiles as aiofiles
from globalvariables import *
from Agents.agent import Agent
from treelib import Tree
import json
import time
from utils import *
from outline import *
import random
from collections import deque
from outline import Outline


class GenerationAgent(Agent):
    def __init__(self, token):
        super().__init__(token)

    async def GenerateQueries(self, topic, survey_lang):
        async def translate_topic(topic, trans_tar_lang):
            translate_input = translate_prompts[trans_tar_lang] % topic
            topic_trans = await self.chat_async(user_input=translate_input, response_format_type="text",
                                                language=trans_tar_lang, do_sample=False)
            return topic_trans

        async def generate_keywords(topic):
            generate_expressions_input = generate_expressions_prompt % topic
            resp = await self.chat_async(user_input=generate_expressions_input,
                                         response_format_type="json_object",
                                         language="English", do_sample=False)
            expressions = try_parse_json_object(resp)[1]
            keywords = extract_and_split_keywords(expressions)
            return keywords

        the_other_lang = {"English": "Chinese", "Chinese": "English"}
        trans_tar_lang = the_other_lang[survey_lang]
        task_1 = asyncio.create_task(translate_topic(topic, trans_tar_lang))
        task_2 = asyncio.create_task(generate_keywords(topic))
        topic_2, keywords = await asyncio.gather(task_1, task_2)
        return topic, topic_2, keywords

    async def GenerateOutlinesText(self, chunks_keyinfo, survey_topic, survey_lang, survey_type):
        async def generate_single_outline(keyinfo_in_category, survey_topic, survey_lang, survey_type):
            concated_keyinfo = ""
            # 将keyinfo从字典格式转换为字符串格式
            for category in keyinfo_in_category.keys():
                concated_keyinfo += f"## {category} \n\n"
                for keyinfo in keyinfo_in_category[category]:
                    chunk_number = keyinfo.get("chunk_number4outline", "unknown")
                    entity_name = keyinfo.get("entity_name", "unknown")
                    relation = keyinfo.get("descriptive_linkage", "unknown")
                    # concated_keyinfo += f"[{chunk_number}] **{entity_name}**\n"
                    concated_keyinfo += f"[{chunk_number}] **{entity_name}**\n"
            # 生成提纲
            samples = generate_outline_samples[survey_type][survey_lang]
            gen_outline_input = generate_outline_prompts_v2[survey_lang] % (survey_topic,
                                                                            survey_type_int2str[survey_lang][
                                                                                survey_type],
                                                                            samples[0],
                                                                            concated_keyinfo,
                                                                            outline_type_instructions[survey_type][
                                                                                survey_lang])
            start = time.time()
            response_text = await self.chat_async(gen_outline_input, response_format_type="text", language=survey_lang,
                                                  do_sample=False, max_tokens=4095)
            print(f"生成一份初步大纲耗时: {time.time() - start}s")
            return response_text

        # 将keyinfo按照category分类
        keyinfo_in_category = {}
        for chunk_number in range(len(chunks_keyinfo)):
            for keyinfo_index in range(len(chunks_keyinfo[chunk_number])):
                try:
                    chunks_keyinfo[chunk_number][keyinfo_index]["chunk_number4outline"] = chunk_number
                    category = chunks_keyinfo[chunk_number][keyinfo_index].get("entity_category", "unknown")
                    if category in keyinfo_in_category.keys():
                        keyinfo_in_category[category].append(chunks_keyinfo[chunk_number][keyinfo_index])
                    else:
                        keyinfo_in_category[category] = [chunks_keyinfo[chunk_number][keyinfo_index]]
                except Exception as e:
                    print(e)
                    continue
        # 设想是每5篇文章列一个提纲，文献库显示平均每篇文章有8.5个chunk，也就是说，设想是每40个chunk列一个提纲。
        num_of_outline = max(int(0.5 + len(chunks_keyinfo) / 40), 1)  # 四舍五入得到提纲数量
        # 将keyinfo按category随机划分为num_of_outline份
        keyinfo4outlines = divide_data(keyinfo_in_category, num_of_outline)
        tasks_generate_outline = [generate_single_outline(keyinfo4outline, survey_topic, survey_lang, survey_type)
                                  for keyinfo4outline in keyinfo4outlines]
        outlines = await asyncio.gather(*tasks_generate_outline)
        return outlines

    def GenerateOutlineTitleText(self, papers_keyinfo, survey_topic, survey_lang, survey_type):
        papers_keyinfo_str = []
        for paper_keyinfo in papers_keyinfo:
            paper_keyinfo_str = ""
            for key, value in paper_keyinfo.items():
                if key != "paper_id":
                    paper_keyinfo_str += value + "\n"
            papers_keyinfo_str.append(paper_keyinfo_str)
        concat_words = """##以下是从第%d篇文献中提取出来的关键信息\n"""

        concated_papers_keyinfo = ''.join(
            [concat_words % (i + 1) + paper_keyinfo_str for i, paper_keyinfo_str in enumerate(papers_keyinfo_str)])

        # 准备提纲生成prompt
        samples = generate_outline_samples[survey_type][survey_lang]
        gen_outline_input = generate_outline_prompts[survey_lang] % (survey_topic,
                                                                     survey_type_int2str[survey_lang][survey_type],
                                                                     samples[0],
                                                                     samples[1],
                                                                     samples[2],
                                                                     concated_papers_keyinfo)
        try:
            response_text = self.chat(gen_outline_input, response_format_type="text", language=survey_lang,
                                      do_sample=False)
            return response_text
        except Exception as e:
            print(e)

    def MergeOutlinesText(self, outlines_text, survey_lang, survey_type, survey_topic):
        # 拼接输入大纲
        start = time.time()
        concat_words = {"Chinese": "\n输入大纲{number}\n\n", "English": "\n## Input Outline{number}\n\n"}
        concat_word = concat_words[survey_lang]
        outlines_input = ""
        for index, outline_text in enumerate(outlines_text):
            outlines_input += concat_word.replace("{number}", str(index + 1)) + outline_text
        output_sample = generate_outline_samples[survey_type][survey_lang][0]
        merge_outline_input = merge_outlines_prompts[survey_lang] % (outlines_input, output_sample)

        outline_text_merged = self.chat(merge_outline_input, response_format_type="text", language=survey_lang,
                                        do_sample=False, max_tokens=4095)
        end = time.time()
        print(f"合并多个提纲耗时: {end - start}s")
        # 格式化大纲，以便后续解析
        # todo 如果时间有富余，将这里改为异步，每行异步格式化
        fomatize_outline_input = fomatize_outline_prompt[survey_lang] % outline_text_merged
        outline_text_merged_fomatized = self.chat(fomatize_outline_input, response_format_type="text",
                                                  language=survey_lang,
                                                  do_sample=False, max_tokens=4095)
        end_1 = time.time()
        print(f"格式化提纲耗时: {end_1 - end}s")
        return outline_text_merged_fomatized

    async def GenerateDescription(self, survey_lang, survey_topic, outline, chunks_keyinfo, survey_type, semaphore):
        def cosine_similarity(vector1, vector2):
            vector1 = np.array(vector1)
            vector2 = np.array(vector2)
            dot_product = np.dot(vector1, vector2)
            norm_vector1 = np.linalg.norm(vector1)
            norm_vector2 = np.linalg.norm(vector2)
            return dot_product / (norm_vector1 * norm_vector2)

        async def GenerateDescription4SingleLeaf(outline_text, leaf, samples, semaphore):
            ref_keyinfos = ""
            for keyinfo in leaf.data.get("reference", []):
                ref_keyinfos += f"- **{keyinfo[1]['entity_name']}** :{keyinfo[1]['descriptive_linkage']}\n"
            leaf_id = leaf.identifier.replace("_", ".")
            leaf_title = leaf.data.get("title", "")
            sample_str = "\n".join(random.sample(samples, 3))  # 3-shot learning
            # 生成了喂给大模型的提示词
            gen_description_input = generate_description_prompts[survey_lang] % (
                survey_topic, f"{leaf_id} {leaf_title}", leaf_title, ref_keyinfos, sample_str, outline_text)
            async with semaphore:
                leaf_text_des = await self.chat_async(user_input=gen_description_input,
                                                      response_format_type="text", language=survey_lang,
                                                      do_sample=False)
                leaf.data["description"] = leaf_text_des

        outline_leaf_nodes = outline.getSubTreeLeaves()
        keyinfo_entities = []
        # 展平keyinfo，通过相似度计算为每个叶子章节寻找对应参考关键信息
        for chunk in chunks_keyinfo:
            keyinfo_entities.extend(chunk)
        for leaf in outline_leaf_nodes:
            title_emb = np.array(leaf.data['title_emb'])
            keyinfos_with_similarity = []
            for keyinfo_entity in keyinfo_entities:
                entity_emb = np.array(keyinfo_entity.get('entity_name_emb', [1e-6] * 2048))
                similarity = cosine_similarity(title_emb, entity_emb)
                # 带有相似度的关键信息元组
                keyinfos_with_similarity.append((similarity, keyinfo_entity))
            # 选择相似度最高的k个
            top_k = sorted(keyinfos_with_similarity, key=lambda x: x[0], reverse=True)[:20]
            leaf.data["reference"] = top_k

        outline_text = outline.getSurveyContent()
        # 拼接输入输出对的示例sample
        samples = generate_description_samples[survey_lang]
        tasks = [GenerateDescription4SingleLeaf(outline_text, leaf, samples, semaphore) for leaf in outline_leaf_nodes]
        await asyncio.gather(*tasks)

    async def GenerateSectionDraft(self, outline, topic, survey_lang, reference_chunks_c):

        def getClaimStr(section):
            """
            :param reference_chunks_c:
            :param section:
            :return:
            """
            claims_str = ''
            for ref_chunk in section.data['reference']:
                claims_str += f"**第{ref_chunk['chunk_internal_id']}篇参考文献中的关键内容**\n" + '\n'.join(
                    ref_chunk['chunk_claims']) + '\n'
            if len(section.data['reference']) > 20:
                ref_chunks = random.sample(section.data['reference'], 20)
            else:
                ref_chunks = section.data['reference']

            for ref_chunk in ref_chunks:
                claims_str += f"**第{ref_chunk['chunk_internal_id']}篇参考文献中的关键内容**\n" + '\n'.join(
                    ref_chunk['chunk_claims']) + '\n'
            return claims_str

        def getClaimInfo(reference_chunks_c, outline):
            def modify_func(node_id, reference_chunks_c, outline):
                # 获取当前节点的子节点
                node = outline.get_node(node_id)
                if len(outline.get_children(node_id)) == 0:
                    # 表明该节点是叶子节点
                    for index, ref_chunk in enumerate(node.data['reference']):
                        for idx, ref_chunk_in_list in enumerate(reference_chunks_c):
                            if ref_chunk_in_list['chunk_ext_id'] == ref_chunk['chunk_ext_id']:
                                node.data['reference'][index]['chunk_claims'] = ref_chunk_in_list['chunk_claims']
                                node.data['reference'][index]['chunk_internal_id'] = idx
                else:
                    # 表明该节点不是叶子节点，此时这些节点的'reference'都还是None状态，此时只需要将其子节点的ref_chunks集结去重即可
                    node.data['reference'] = []
                    ext_ids = []
                    for child in outline.get_children(node_id):
                        for ref_chunk_of_child in child.data['reference']:
                            if ref_chunk_of_child['chunk_ext_id'] not in ext_ids:
                                ext_ids.append(ref_chunk_of_child['chunk_ext_id'])
                                node.data['reference'].append(ref_chunk_of_child)

            def reverse_order_traversal(node_id, reference_chunks_c, outline):
                """
                自底向上遍历树节点并修改节点值
                :param node: 当前节点
                :param modify_func: 修改节点值的函数
                """
                if outline.get_node(node_id) == None:
                    return

                # 递归遍历所有子节点
                for child in outline.get_children(node_id):
                    reverse_order_traversal(child.identifier, reference_chunks_c, outline)

                # 修改当前节点的值
                modify_func(node_id, reference_chunks_c, outline)

            reverse_order_traversal(outline.root, reference_chunks_c, outline)

        async def process_leaf(semaphore, outline, identifier):
            async with semaphore:
                await gen_leaf_content(outline, identifier)

        async def gen_leaf_content(outline, identifier):
            section_leaf = outline.get_node(identifier)
            # 获取当前章节的声明块
            claims_str = getClaimStr(section_leaf)
            # 生成大模型输入
            generate_draft_input = generate_draft_prompts[survey_lang] % (
                topic,
                outline.showSurveyOutline(format=1),
                section_leaf.data['title'],
                section_leaf.data['description'],
                claims_str
            )
            try:
                # 利用大模型生成内容并写入对应子章节
                response_text = await self.chat_async(
                    generate_draft_input,
                    response_format_type="text",
                    language=survey_lang,
                    do_sample=False,
                    max_tokens=4095
                )
                leaf_section = outline.get_node(section_leaf.identifier)
                leaf_section.data['content'] = response_text
            except Exception as e:
                print(f"Error processing leaf {section_leaf.identifier}: {e}")

        async def process_2rd_leaf(semaphore, outline, identifier):
            async with semaphore:
                await gen_leaf_2rd_content(outline, identifier)

        async def gen_leaf_2rd_content(outline, identifier):
            section_2rd_leaf = outline.get_node(identifier)
            # 获取其子章节内容
            children_section_content = outline.getSurveyContent(section_2rd_leaf.identifier)
            # 获取章节声明
            claims_str = getClaimStr(section_2rd_leaf)
            # 生成大模型输入
            generate_2rd_draft_input = generate_2rd_draft_prompts[survey_lang] % (
                section_2rd_leaf.data['title'],
                children_section_content,
                topic,
                outline.showSurveyOutline(format=1),
                claims_str,
                section_2rd_leaf.data['title'],
                section_2rd_leaf.data['description'],
                topic,
                section_2rd_leaf.data['title']
            )
            try:
                # 利用大模型生成内容并写入对应子章节
                response_text = await self.chat_async(
                    generate_2rd_draft_input,
                    response_format_type="text",
                    language=survey_lang,
                    do_sample=False,
                    max_tokens=4095
                )
                leaf_2rd_section = outline.get_node(section_2rd_leaf.identifier)
                leaf_2rd_section.data['content'] = response_text
            except Exception as e:
                print(f"Error processing 2nd-level leaf {section_2rd_leaf.identifier}: {e}")

        async def process_section(semaphore, section):
            async with semaphore:
                await gen_section_content(section)

        async def gen_section_content(section):
            # 获取特定章节的子树
            subtree = outline.subtree(section.identifier)
            # 遍历所有内容为空的子章节
            for blank_node in subtree.all_nodes_itr():
                if isinstance(blank_node.data, dict) and 'content' in blank_node.data and blank_node.data[
                    'content'] is not None and blank_node.data['content'] == "":
                    # 处理内容为空字符串的情况
                    blank_node_content = outline.getSurveyContent(blank_node.identifier)
                    # 生成大模型输入
                    completion_draft_input = completion_draft_prompts[survey_lang] % (
                        blank_node.data['title'],
                        blank_node_content,
                        topic,
                        outline.showSurveyOutline(format=1),
                        topic,
                        blank_node.data['title']
                    )
                    try:
                        # 利用大模型生成内容并写入对应子章节
                        response_text = await self.chat_async(
                            completion_draft_input,
                            response_format_type="text",
                            language=survey_lang,
                            do_sample=False,
                            max_tokens=4095
                        )
                        node_blank = outline.get_node(blank_node.identifier)
                        node_blank.data['content'] = response_text
                    except Exception as e:
                        print(f"Error processing leaf {blank_node.identifier}: {e}")

        # 将声明索引赋给各个节点
        print("将声明索引赋给各个节点")
        getClaimInfo(reference_chunks_c, outline)

        print("撰写叶子节点内容")
        semaphore_1 = asyncio.Semaphore(35)
        # 获取所有叶子节点
        leaves = outline.getSubTreeLeaves()
        # 对于可并行生成的叶子章节建立章节内容生成任务
        tasks_1 = [process_leaf(semaphore_1, outline, leaf.identifier) for leaf in leaves]
        # 并发执行内容生成任务
        await asyncio.gather(*tasks_1)

        print("撰写倒数第二级叶子节点内容")
        semaphore_2 = asyncio.Semaphore(20)
        # 获取所有倒数第二级的节点
        section_2rd_leaves = []
        for section_leaf in leaves:
            parent_section = outline.parent(section_leaf.identifier)
            # 若该节点不是根节点且没有被收录至section_2rd_leaves列表中，则添加入该列表
            if parent_section.identifier != 'root' and parent_section not in section_2rd_leaves:
                section_2rd_leaves.append(parent_section)
        # 对于可并行生成的倒数第二级章节建立章节内容生成任务
        tasks_2 = [process_2rd_leaf(semaphore_2, outline, leaf_2rd.identifier) for leaf_2rd in section_2rd_leaves]
        # 并发执行内容生成任务
        await asyncio.gather(*tasks_2)

        print("撰写剩余章节内容")
        # 获取所有章节
        semaphore_3 = asyncio.Semaphore(15)
        sections = outline.get_children(outline.root)
        # 对于可并行生成的章节建立章节内容生成任务
        tasks_3 = [process_section(semaphore_3, section) for section in sections]
        # 并发执行内容生成任务
        await asyncio.gather(*tasks_3)

    async def GenerateTitleAbaCon(self, outline, topic, survey_lang, survey_type):
        async def GenerateTitle(topic, survey_lang, survey_type):
            # 生成标题的逻辑
            generate_title_input = f"Generate a title for the topic '{topic}' in {survey_lang} for a {survey_type} survey. Directly output the result."
            try:
                response_text = await self.chat_async(
                    generate_title_input,
                    response_format_type="text",
                    language=survey_lang,
                    do_sample=False,
                    max_tokens=100  # 标题通常较短
                )
                return response_text
            except Exception as e:
                print(f"Error generating title: {e}")
                return None

        async def GenerateAbastract(outline, topic, survey_lang):
            # 生成摘要的逻辑
            # generate_abstract_input = f"Generate an abstract for the topic '{topic}' in {survey_lang} based on the outline: {outline}."
            sections = outline.all_nodes()  # 获取所有章节（扁平化存储）

            result_lines_1 = []
            result_lines_2 = []

            for section in sections:
                section_identifier = section.tag  # 获取章节编号（即 section_identifier）
                title = section.data.get("title", "无标题")
                content = section.data.get("content", "无内容")

                # 格式化字符串
                result_lines_1.append(f"{section_identifier} {title}\n{content}\n")
                result_lines_2.append(f"{section_identifier} {title}\n")
            output_text = "\n".join(result_lines_1)  # 用换行符拼接所有章节信息
            outline_text = "\n".join(result_lines_2)  # 按行拼接字符串

            generate_abstract_input = generate_draft_abstract[survey_lang] % (topic, outline_text, output_text)
            try:
                response_text = await self.chat_async(
                    generate_abstract_input,
                    response_format_type="text",
                    language=survey_lang,
                    do_sample=False,
                    max_tokens=4095
                )
                return response_text.strip()
            except Exception as e:
                print(f"Error generating abstract: {e}")
                return None

        async def GenerateConclusions(outline, topic, survey_lang):
            # 生成结论的逻辑
            # generate_conclusions_input = f"Generate conclusions for the topic '{topic}' in {survey_lang} for a {survey_type} survey based on the outline: {outline}."
            """ 遍历 Outline 结构，提取所有节点的章节编号、标题和内容，并格式化为字符串 """

            sections = outline.all_nodes()  # 获取所有章节（扁平化存储）

            result_lines_1 = []
            result_lines_2 = []

            for section in sections:
                section_identifier = section.tag  # 获取章节编号（即 section_identifier）
                title = section.data.get("title", "无标题")
                content = section.data.get("content", "无内容")

                # 格式化字符串
                result_lines_1.append(f"{section_identifier} {title}\n{content}\n")
                result_lines_2.append(f"{section_identifier} {title}\n")
            output_text = "\n".join(result_lines_1)  # 用换行符拼接所有章节信息
            outline_text = "\n".join(result_lines_2)  # 按行拼接字符串

            generate_conclusions_input = generate_draft_conclusion[survey_lang] % (topic, outline_text, output_text)
            try:
                response_text = await self.chat_async(
                    generate_conclusions_input,
                    response_format_type="text",
                    language=survey_lang,
                    do_sample=False,
                    max_tokens=4095  # 结论通常较长
                )
                return response_text

            except Exception as e:
                print(f"Error generating conclusions: {e}")
                return None

        # 异步调用三个函数
        title_task = asyncio.create_task(GenerateTitle(topic, survey_lang, survey_type))
        abstract_task = asyncio.create_task(GenerateAbastract(outline, topic, survey_lang))
        conclusions_task = asyncio.create_task(GenerateConclusions(outline, topic, survey_lang))

        # 等待所有任务完成
        title, abstract, conclusions = await asyncio.gather(title_task, abstract_task, conclusions_task)
        result = {}
        result['title'] = title
        result['abstract'] = abstract
        result['conclusions'] = conclusions
        # 返回生成的结果
        return result

    def GenerateRefList(self, outline):
        """
        :param outline:
        :return ref_list:
        """

        def replace_with_rank(text):
            # 替换函数，将匹配到的数字替换为它的排位数
            def replace_match(match):
                num = int(match.group(1))  # 提取数字
                return f'<sup>{rank_dict[num]}</sup>'  # 替换为排位数

            # 使用正则表达式找到所有 <sup>n</sup> 中的 n
            numbers = re.findall(r'<sup>(\d+)</sup>', text)

            # 去重并转换为整数
            unique_numbers = list(set(map(int, numbers)))

            # 对数字进行排序
            sorted_numbers = sorted(unique_numbers)

            # 创建一个字典，存储数字到排位数的映射
            rank_dict = {num: i + 1 for i, num in enumerate(sorted_numbers)}

            # 使用 re.sub 进行替换
            new_text = re.sub(r'<sup>(\d+)</sup>', replace_match, text)
            return new_text, rank_dict
        used_ref_chunks = []  # 用于存储那些被使用了的参考的keyinfo_chunk
        used_ref_chunks_id = []  # 用于存储那些被使用了的参考的keyinfo_chunk的自定义ID
        used_ref_chunks_id2number = {}  # 用于从自定义ID映射到最终的编号
        # 统计使用过的keyinfo
        for node in outline.all_nodes_itr():
            used_ref_dict = node.data["used_ref"]  # 得到的是ref字典，值是keyinfo_chunk
            for ref_info in used_ref_dict.values():
                ref_chunk_id = f"""{ref_info["src_chunk_info"]["paper_title"]}_{ref_info["src_chunk_info"]["chunk_id"]}"""
                # 如果该引用还没有被记录
                if ref_chunk_id not in used_ref_chunks_id:
                    used_ref_chunks_id.append(ref_chunk_id)
                    used_ref_chunks_id2number[ref_chunk_id] = len(used_ref_chunks_id)
                    used_ref_chunks.append(ref_info)
        for node in outline.all_nodes_itr():
            content = node.data["content"]
            listed_ref = node.data["listed_ref"]  # list
            if type(content) == str:
                used_index_in_text = extract_sup_numbers(content)  # 文本中使用到的编号
                used_index = [i-1 for i in used_index_in_text]  # 全部减一则是合适的索引号in listed_ref
                for index in used_index:  # 对于每一个文中使用到的编号对应的listed_index的索引号
                    if index>=0 and index<len(listed_ref):
                        ref_info = listed_ref[index]  # 特定chunk
                        src_chunk_info = ref_info.get("src_chunk_info", {})
                        ref_chunk_id = f"""{src_chunk_info.get("paper_title", "")}_{src_chunk_info.get("chunk_id","")}"""
                        if ref_chunk_id in used_ref_chunks_id2number.keys():
                            target_number = used_ref_chunks_id2number[ref_chunk_id]
                            content = content.replace(f'<sup>{index+1}</sup>', f'<sup>{target_number}</sup>')
                            node.data["content"] = content
        references_str = "\n\n# References\n\n"
        for index, used_ref in enumerate(used_ref_chunks):
            number = index + 1
            src_chunk_info = used_ref.get("src_chunk_info", "")
            title = src_chunk_info.get("paper_title", "")
            source = src_chunk_info.get("conference_name", "")
            year = str(src_chunk_info.get("year", ""))
            chunk_id = str(src_chunk_info.get("chunk_id", ""))
            references_str += f"[{number}] {title} {source}, {year}, chunk {chunk_id}\n"
        new_content = outline.getSurveyContent()
        final_content = new_content + references_str
        if final_content.startswith('## root'):
            final_content = '## ' + final_content[7:]
        return final_content
        # ref_dict = {}
        # content = outline.getSurveyContent()
        # new_content, rank_dict = replace_with_rank(content)
        # for key, value in rank_dict.items():
        #     number = value
        #     title = reference_chunks_c[int(key)]['paper_title']
        #     if not title.endswith('.'):
        #         title += '.'
        #     source = reference_chunks_c[int(key)]['source_name']
        #     year = reference_chunks_c[int(key)]['year']
        #     id = reference_chunks_c[int(key)]['chunk_id']
        #     ref_dict[str(number)] = f"[{number}] {title} {source}, {year}, chunk {id}"
        # ref_str = "\n\n# References\n\n"
        # for idx in range(len(ref_dict)):
        #     ref_str += ref_dict[str(idx + 1)] + '\n'
        # final_content = new_content + ref_str
        # if final_content.startswith('## root'):
        #     final_content = '## ' + final_content[7:]
        # return final_content

        # def process_md(self, input_process_md, outline, test_Output_md):
        #     def clean_wrong_references(input_md, claims, id):
        #         """
        #         仅保留指定声明的 <sup>id</sup>，删除该 id 但声明不匹配的 <sup>id</sup>，其他 id 保持不变
        #
        #         :param input_md: Markdown 文本内容（字符串）
        #         :param claims: 需要保留的正确声明（字符串）
        #         :param id: 参考文献的编号（整数）
        #         :return: 处理后的 Markdown 文本
        #         """
        #         # **正则表达式：仅匹配指定的 <sup>id</sup>**
        #         with open(input_md, "r", encoding="utf-8") as f:
        #             md_text = f.read()
        #         pattern = rf"([^.<]*?)\s*<sup>{id}</sup>"
        #         claims=claims.rstrip('.')
        #         def replace_func(match):
        #             statement = match.group(1).strip()
        #             # **如果 statement 是 claims，保留 <sup>id</sup>**
        #             if statement == claims.strip():
        #                 return f"{statement} <sup>{id}</sup>"
        #             else:
        #                 return statement  # ❌ 删除错误 <sup>id</sup>
        #
        #         # **替换 Markdown 文本**
        #         cleaned_md = re.sub(pattern, replace_func, md_text)
        #         with open('review1.md', "w", encoding="utf-8") as f:
        #             f.write(cleaned_md)
        #         print("本次更新review1.md完成")
        #
        #     with open(input_process_md, "r", encoding="utf-8") as f:
        #         md_text = f.read()
        #     matches = re.findall(r"([^.<]*?)\s*<sup>(\d+)</sup>", md_text)
        #     statement_dict = {}
        #     for statement, ref_num in matches:
        #         ref_num = int(ref_num)
        #         if ref_num not in statement_dict:
        #             statement_dict[ref_num] = []
        #         statement_dict[ref_num].append(statement.strip())
        #     print(statement_dict)
        #     # 定位到References部分
        #     references_section = re.search(r"# References\n(.*)", md_text, re.DOTALL)
        #     if references_section:
        #         references_text = references_section.group(1).strip()
        #     else:
        #         raise ValueError("No references section found in the markdown file.")
        #     # 使用正则表达式提取参考文献编号、标题和chunk信息
        #     pattern = r"\[(\d+)\] (.*?)\. .*?chunk (\d+)"
        #     matches = re.findall(pattern, references_text)
        #     # 将提取的内容整理为字典
        #     references_dict = {int(num): (title.strip(), int(chunk)) for num, title, chunk in matches}
        #     print(references_dict)
        #     for i in range(1, len(references_dict) + 1):
        #     # 测试只测试两个
        #     # for i in range(1, 3):
        #         chunk_text = ''
        #         statements_str = ''
        #         for leafnode in outline.getSubTreeLeaves():
        #             for j in range(0, len(leafnode.data['reference'])):
        #                 if references_dict[i][0] == leafnode.data['reference'][j]['paper_title'] and references_dict[i][
        #                     1] == \
        #                         leafnode.data['reference'][j]['chunk_id']:
        #                     chunk_text = leafnode.data['reference'][j]['chunk_content']
        #                     break
        #         # 获取该参考文献的所有声明列表
        #         statements = statement_dict.get(i, [])
        #         # 将声明列表按照1,2,3...的序号连接成一个字符串
        #         statements_str = "\n".join([f"{i + 1}. {statement}" for i, statement in enumerate(statements)])
        #         youhua_claims_number_prompts = youhua_claims_number["Chinese"] % (
        #             references_dict[i][0], statements_str, chunk_text)
        #         print(youhua_claims_number_prompts)
        #         try:
        #             response_text = self.chat(
        #                 youhua_claims_number_prompts,
        #                 response_format_type="text",
        #                 language="Chinese",
        #                 do_sample=False,
        #                 max_tokens=500  # 结论通常较长
        #             )
        #             clean_wrong_references(input_process_md, response_text, i)
        #         except Exception as e:
        #             print(f"Error generating conclusions: {e}")
        #     print("优化完成")

    async def clean_wrong_references(self, input_md, claims, id, output_md):
        """
        仅保留指定声明的 <sup>id</sup>，删除该 id 但声明不匹配的 <sup>id</sup>，其他 id 保持不变
        :param input_md: Markdown 文本内容（字符串）
        :param claims: 需要保留的正确声明（字符串）
        :param id: 参考文献的编号（整数）
        :return: 处理后的 Markdown 文本
        """
        async with aiofiles.open(input_md, "r", encoding="utf-8") as f:
            md_text = await f.read()

        pattern = rf"([^.<]*?)\s*<sup>{id}</sup>"
        claims = claims.rstrip('.')

        def replace_func(match):
            statement = match.group(1).strip()
            # **如果 statement 是 claims，保留 <sup>id</sup>**
            if statement == claims.strip():
                return f"{statement} <sup>{id}</sup>"
            else:
                return statement  # ❌ 删除错误 <sup>id</sup>

        # **替换 Markdown 文本**
        cleaned_md = re.sub(pattern, replace_func, md_text)

        async with aiofiles.open(output_md, "w", encoding="utf-8") as f:
            await f.write(cleaned_md)
        print(f"更新 output_md 完成, <sup>{id}</sup>已优化")

    async def process_batch(self, input_process_md, output_process_md, statement_dict, references_dict, outline, start,
                            end):
        for i in range(start, end):
            chunk_text = ''
            statements_str = ''
            for leafnode in outline.getSubTreeLeaves():
                for j in range(0, len(leafnode.data['reference'])):
                    if references_dict[i][0] == leafnode.data['reference'][j]['paper_title'] and references_dict[i][
                        1] == leafnode.data['reference'][j]['chunk_id']:
                        chunk_text = leafnode.data['reference'][j]['chunk_content']
                        break
            statements = statement_dict.get(i, [])
            statements_str = "\n".join([f"{i + 1}. {statement}" for i, statement in enumerate(statements)])
            youhua_claims_number_prompts = youhua_claims_number["English"] % (
                references_dict[i][0], statements_str, chunk_text)
            print(youhua_claims_number_prompts)

            try:
                response_text = await self.chat_async(
                    youhua_claims_number_prompts,
                    response_format_type="text",
                    language="English",
                    do_sample=False,
                    max_tokens=500  # 结论通常较长
                )
                await self.clean_wrong_references(input_process_md, response_text, i, output_process_md)
            except Exception as e:
                print(f"Error generating conclusions for <sup>{i}</sup>: {e}")

    async def process_md(self, input_process_md, outline, output_process_md):
        async with aiofiles.open(input_process_md, "r", encoding="utf-8") as f:
            md_text = await f.read()

        # matches = re.findall(r"([^.<]*?)\s*<sup>(\d+)</sup>", md_text)
        # statement_dict = {}
        # for statement, ref_num in matches:
        #     ref_num = int(ref_num)
        #     if ref_num not in statement_dict:
        #         statement_dict[ref_num] = []
        #     statement_dict[ref_num].append(statement.strip())
        #
        # print(statement_dict)

        # 改进后，针对claim<sup>1</sup><sup>2</sup>这样的形式
        pattern = r"([^.!\n<]*?)\s*(<sup>\d+</sup>(?:<sup>\d+</sup>)*)"
        matches = re.findall(pattern, md_text)
        statement_dict = {}
        for statement, ref_tags in matches:
            # 提取所有引用编号
            ref_nums = re.findall(r"<sup>(\d+)</sup>", ref_tags)
            for ref_num in ref_nums:
                ref_num = int(ref_num)
                if ref_num not in statement_dict:
                    statement_dict[ref_num] = []
                statement_dict[ref_num].append(statement.strip())
        print(statement_dict)

        # 定位到References部分
        references_section = re.search(r"# References\n(.*)", md_text, re.DOTALL)
        if references_section:
            references_text = references_section.group(1).strip()
        else:
            raise ValueError("No references section found in the markdown file.")

        pattern1 = r"\[(\d+)\] (.*?)\. .*?chunk (\d+)"
        matches = re.findall(pattern1, references_text)
        references_dict = {int(num): (title.strip(), int(chunk)) for num, title, chunk in matches}
        print(references_dict)

        # 每个异步批次有30个任务
        tasks = []
        batch_size = 30
        for i in range(1, len(references_dict) + 1, batch_size):
            tasks.append(
                self.process_batch(input_process_md, output_process_md, statement_dict, references_dict, outline, i,
                                   min(i + batch_size, len(references_dict) + 1)))

        await asyncio.gather(*tasks)
        print("优化完成")


if __name__ == "__main__":
    # title = "提升大模型的规划能力：综述"
    title = "Enhancing Planning Abilities of Large Models: A Comparative Analysis of Strategies"
    #
    #     O = parseText2Outline(title, outline_des)
    #     outline_list = extract_descriptions(O)
    #     chunk_claim = {"646edc9cd68f896efaddac94_0": ["### 2.4 大模型中的规划能力", "#### 3.1.1 强化学习"],
    #                    "646edc9cd68f896efaddac94_1": ["#### 3.3.1 强化学习与监督学习的结合", "### 3.2.1 分层规划"],
    #                    "646edc9cd68f896efaddac94_2": ["#### 3.1.1 强化学习", "#### 3.1.3 迁移学习"],
    #                    "646edc9cd68f896efaddac93_3": ["#### 3.1.1 强化学习", "#### 3.2.1 分层规划"]}
    #     claims_in_papers = [
    #         {
    #             "paper_id": "646edc9cd68f896efaddac94",
    #             "paper_title": "abcd",
    #             "chunk_ext_id": ['a', 'b', 'c'],
    #             "chunk_claims": [['a', 'b', 'c', 'd'], ['a', 'b'], ['a', 'b', 'c']]
    #         },
    #         {
    #             "paper_id": "646edc9cd68f896efaddac93",
    #             "paper_title": "abcd",
    #             "chunk_ext_id": ['a', 'b', 'c', 'd'],
    #             "chunk_claims": [['a', 'b', 'c', 'd'], ['a', 'b'], ['a', 'b', 'c'], ['a']]
    #         }
    #     ]
    #     section_claim = claim_format_transform(chunk_claim, claims_in_papers, O)
    #     print(section_claim)
    # print(O.show(line_type="ascii", stdout=False))
    # print(O.showSurveyOutline(format=1, section_identifier='1'))

    t = Outline()

    r = Section(section_identifier="root", title="A survey on deep 3D human pose estimation", description="",
                reference='',
                content="")
    s1 = Section(section_identifier="1", title="引言",
                 description='介绍了大型语言模型（LLMs）的兴起，LLMs在规划能力方面的挑战，提升LLMs规划能力的重要性，以及本文综述的目的和结构。',
                 reference='',
                 content="""The chapter "Introduction" sets the stage for an in-depth exploration of Large Language Models (LLMs) and their capabilities, particularly focusing on the challenges and advancements in planning. The chapter begins by highlighting the rise of LLMs, which have revolutionized natural language processing and understanding through their expanded capabilities in language understanding and generation. These models, such as GPT-3, Bloom, and PaLM, have demonstrated remarkable progress in various tasks, including semantic understanding, dialogue generation, programming, and mathematical problem-solving <sup>1</sup><sup>8</sup>. The introduction of instruction tuning has further enhanced their ability to follow complex user instructions and engage in conversational applications <sup>1</sup>.

However, despite these advancements, LLMs face significant challenges in planning. The lack of an internal world model hinders their ability to predict world states and simulate long-term outcomes of actions, limiting their capacity for deliberate planning <sup>14</sup>. Additionally, LLMs struggle to follow complex user instructions, which is addressed by fine-tuning models on instruction tuning datasets <sup>10</sup>. The difficulty in making predictions specific to tasks or users, due to limitations in fine-tuning and prompt size/context windows, further restricts their flexibility and adaptability in planning <sup>11</sup>.

To address these challenges, researchers have explored various approaches. Fine-tuning LLMs on instruction tuning datasets improves their ability to follow complex user instructions <sup>10</sup>. Incorporating object labels as input or employing costly multi-turn reasoning helps address object hallucinations in multimodal models <sup>10</sup>. Training selection functions on top of LLMs tackles the challenge of abstaining from answering <sup>10</sup>. These approaches aim to enhance LLMs' reasoning and decision-making capabilities, thereby improving their planning abilities.

The subsequent chapters will delve deeper into the methods to enhance LLMs' planning abilities, including knowledge distillation and other techniques. The review paper will provide a comprehensive overview of the current state of LLMs, their limitations, and the ongoing research efforts to overcome these limitations. By understanding the challenges and advancements in LLMs' planning capabilities, we can gain insights into the future development and application of these powerful models in various domains.""")
    s2 = Section(section_identifier="1_1", title="大型语言模型（LLMs）的兴起", description='阐述了LLMs在自然语言处理领域的快速发展，以及其在各种任务中的广泛应用。',
                 reference=[
                     {
                         'chunk_content': '# 6. Conclusion\nThis paper presents S CI BENCH , a college-level benchmark that includes scientific problems from Mathematics, Physics, and Chemistry, as well as exam questions in Computer Science and Mathematics. Our comprehensive evaluation includes a diverse array of Large Language Models (LLMs), spanning both open-source and proprietary models, including unimodal as well as multimodal settings, and employing a variety of prompting strategies. The evaluation protocol we employ serves as a framework for evaluating advanced problem-solving skills of LLMs in scientific domains. The findings of this study highlight that while large language models (LLMs) exhibit impressive performance on introductory mathematical benchmarks, their mastery of problem solving ability remains weak. These findings underscore the limitations of current LLMs in achieving satisfactory performance, even with the assistance of various tools. We envision that the S CI BENCH benchmark dataset and evaluation protocol presented in this paper could lay a foundation for future research and enable advancements in understanding and enhancing problem-solving capabilities of LLMs.\n\n# Reproducibility Statement\nTo foster reproducible research, we include all dataset processing and experiment details of S CI BENCH . We detail data processing in Section 3 and provide the UI design of data collection in Appendix A.3 . We include all experiment details with LLM prompts in Appendix C. Finally, we make our dataset and code publicly available at this repository .\n\n# Ethical Statement\nThe questions of S CI BENCH are sourced from science textbooks and exams. We conduct a manual examination of our dataset to ensure the absence of potential sensitive background or ethical concerns. The inclusion of exam questions has been authorized by the instructors of the respective courses.  \n\nThe purpose of the textbook dataset is solely for academic use. Its collection adheres to the Fair Use Law in the US, where only a certain number of questions from each textbook are selected, ensuring that only a small portion of the textbook is utilized.\n\n# Impact Statement\nThe introduction of S CI BENCH represents a significant advancement in the evaluation of Large Language Models (LLMs) for scientific problem-solving tasks. By focusing on collegiate-level problems in mathematics, chemistry, and physics, S CI BENCH addresses a critical gap in existing benchmarks, which have primarily focused on high-school subjects and basic algebraic operations. This development underscores the necessity of developing specialized benchmarks that challenge LLMs with higher complexity problems, thereby pushing the boundaries of the capabilities of LLMs in academic and research settings.  \n\nWhile the current scope of S CI BENCH encompasses a select group of scientific disciplines, the potential for future extensions is vast. Incorporating additional subjects such as biology, computer science, and engineering could provide a more comprehensive understanding of LLM capabilities across a broader spectrum of scientific knowledge. Moreover, extending the benchmark to social sciences, humanities, and other human-centric domains would be equally beneficial, as these areas often involve nuanced reasoning and interpretation of complex social dynamics and ethical considerations, posing unique challenges that could further enhance the versatility and applicability of LLMs.\n\n# Acknowledgements\nThis work was supported by the National Science Foundation (NSF) under Grant Nos. 1829071, 1937599, 2106859, 2119643, 2202693, 2211557, 2303037, and 2312501; the National Institutes of Health (NIH) under Grant No. U54HG012517; the Defense Advanced Research Projects Agency (DARPA) under Grant No. HR00112490370; NASA; SRC JUMP 2.0 Center; Amazon Research Awards; and Snapchat Gifts.\n\n\n\n# Supplementary Material for S CI BENCH\n\n# A The Textbook Dataset 12\nA.1 Textbook Sources . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 12   \nA.2 Textbook Examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 13   \nA.3 UI Design of the Labeling Tool . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14\n\n# BThe Exam Dataset 14\n\n# C Experimental Details\nC.1 Prompts . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 18   \nC.2 Implementation Details . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 20   \nC.3 Additional Experiment on Wolfram Language . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 20   \nD.1 Assessment of the Evaluation Protocol . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 21   \nD.2 Examples . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 21',
                         'paper_title': 'SciBench: Evaluating College-Level Scientific Problem-Solving Abilities of Large Language Models',
                         'source_name': 'ICML', 'year': '2024', 'chunk_id': 6,
                         'chunk_ext_id': 454845757413944800},
                     {
                         'chunk_content': '# 1 Introduction\nIn recent years, the natural language processing field has seen remarkable advancements in the development of increasingly large language models (LLMs). LLMs such as GPT-3 [Brown et al., 2020], Bloom [Scao et al., 2022], PaLM [Chowdhery et al., 2022], Megatron-Turing-530B [Smith et al., 2022], Chinchilla [Hoffmann et al., 2022], and others have expanded the horizons of language understanding and generation. These neural networks, with hundreds of billions of parameters, exhibit human-like proficiency in complex reasoning [Wei et al., 2022, Wang et al., 2022b, Zhou et al., 2022, Zhang et al., 2022b, Diao et al., 2023b, Shum et al., 2023]. However, the most powerful large models are closed-source [Liang et al., 2022], limiting their accessibility and hindering progress in the field. In response, Meta’s LLaMA [Touvron et al., 2023] offers a suite of powerful open-source models that bolster language model research. Recent works, such as Alpaca [Taori et al., 2023], Vicuna [Chiang et al., 2023], and LMFlow [Diao et al., 2023a], have demonstrated impressive capabilities in instruction-following tasks and conversational applications after instruction tuning. Simultaneously, breakthroughs in the image and multimodal processing, as exemplified by models like LLAVA Liu et al. [2023a] and MiniGPT-4 Zhu et al. [2023], have facilitated image-based interactions with robotic systems. These cutting-edge innovations are highly promising for a diverse array of applications across numerous fields.  \n\nThe field of embodied AI / robotics is set to experience a significant transformation with the rise of powerful LLMs and multi-modal models. This is due to the heavy reliance on human-robot interactions, as highlighted by studies such as [Shah et al., 2023, Brohan et al., 2022, Fang et al., 2020]. With the advancement of LLMs and multi-modal models, robots will be able to interpret human instructions and reason over visual scenes, enabling them to execute corresponding actions. This breakthrough will lead to the creation of intelligent robots that are more helpful to humans, as they can effectively communicate with humans using natural language instructions, enhancing their utility and accessibility for a wider range of users. This exciting development has implications for various fields, including healthcare, manufacturing, and entertainment.  \n\nHowever, it is important to note that while multi-modal models have made remarkable progress in conversing with multi-modal inputs, this alone is insufficient for robots to interact with the physical world effectively. To achieve this, robots must be able to accurately identify objects within visual scenes, which is a vital prerequisite for performing actions such as "moving" and "grasping" objects. This goal of "localizing objects" is closely linked to the field of object detection, which is one of the most fundamental and extensively studied research areas in computer vision. Conventional object detection systems, such as Faster-RCNN [Ren et al., 2015], Retina-Net [Lin et al., 2017], and YOLO [Redmon et al., 2016], have a fixed number of classification heads, which restricts practicality and confines predictions to only those classes that the model has been trained on. Recently, a series of open-vocabulary detection systems have emerged as the new trend [Gu et al., 2021, Li et al., 2022, Yao et al., 2022, Liu et al., 2023b]. Specifically, those models adopt the contrastive learning approach to align the object-level visual features with the textual class embeddings extracted from a pretrained text encoder (e.g., BERT [Devlin et al., 2019]). In this way, those models are able to correctly classify a much wider range of objects during inference.  \n\nDespite the success achieved by open-vocabulary object detection systems, they still require humans to provide accurate categories of the objects to be detected, which is neither user-friendly nor realistic due to the following reasons: (1) Humans are not always capable of supplying accurate object categories due to limited memory or knowledge. For instance, a user may want to locate fruits rich in vitamin K but may lack the specific knowledge of which fruits meet this criterion. In such cases, it would be advantageous for the model to autonomously reason about vitamin K-rich fruits and accurately detect and identify them. (2) The object categories that humans can provide are inherently limited and non-exhaustive. For instance, when detecting external behaviors that may pose an impact or danger to autonomous vehicles in driving, humans may only enumerate a few scenarios, such as restricted visibility, complex pedestrian traffic, and sudden lane changes by preceding vehicles. If the query "detect objects that are dangerous to autonomous vehicles" is directly assigned to the detection model, it can employ its own knowledge to identify a broader range of dangerous behaviors. Human knowledge is restricted, and the object categories that can be listed are likewise finite. In summary, we claim that large language models (such as ChatGPT) are promising in assisting in multimodal reasoning. With powerful reasoning abilities, the instructions from humans can be simplified and the resulting answers will be more accurate and comprehensive.  \n\nIn light of the above limitations of object detection systems, we propose a new research task: reasoning-based object detection. In essence, humans provide abstract queries via natural language, and the model discerns and reasons which object in the image may fulfill the query, subsequently detecting them. We made preliminary explorations in this direction. Specifically, we fine-tune multimodal model built on LLMs to predict objects of interest based on user queries (instructions) and input images. We then provide the object names to an open-vocabulary detector for specific location prediction. This approach allows the model to analyze images and accurately predict the location of objects of interest based on user instructions. To facilitate the instruction-following ability of the model, we curate a high-quality fine-tuning dataset with 5000 images and around 30000 query-answer pairs, which is open-sourced for the research community.  \n\n  \n\nFigure 1: The illustration of reasoning-based object detection task. The detection system is able to interpret human instruction, reason about the visual scene with common sense knowledge, and finally output the objects of interest. During the process, the LLM acts as the brain, while the detector empowers the system with the ability to "see".',
                         'paper_title': 'DetGPT: Detect What You Need Via Reasoning', 'source_name': 'EMNLP',
                         'year': '2023', 'chunk_id': 1, 'chunk_ext_id': 454845652116719540},
                     {
                         'chunk_content': '# BRELATED WORK\nLMMs. The success of Large Language Models (LLMs) ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ;Hoffmann et al. ,2022 ;Touvron et al. ,2023b ) has spurred considerable efforts to extend the potential of these models to more modalities ( Chen et al. ,2022b ;2023 ;Huang et al. ,2023 ;Li et al. ,2023c ;Wang et al. ,2022c ). In particular, Large Multimodal Models (LMMs) ( Alayrac et al. ,2022 ), or multimodal models (beyond one modality) that train a large number of parameters (beyond 1B parameter) on large datasets (hundreds of millions of examples). Typical LMMs build on top of LLMs, with additional adaptation modules. These models mainly differ in the adaptation modules ( Shukor et al. ,2023a ;Li et al. ,2023c ), pretraining data ( Schuhmann et al. ,2021 ;Zhu et al. ,2023b ;Lauren c¸ on et al. ,2023 ), and initialization (LLMs). These LMMs surpass the performance of traditional finetuned multimodal models ( Li et al. ,2021 ;Shukor et al. ,2022 ;Dou et al. ,2021 ). Recently, a proprietary model called Flamingo ( Alayrac et al. ,2022 ), has been proposed, followed by several open source models such as Open Flamingo (OF) ( Awadalla et al. ,2023 ) and IDEFICS ( Lauren c¸ on et al. ,2023 ). While most LMMs are currently tailored to image-text tasks, many works have demonstrated the potential for extension to other modalities ( Shukor et al. ,2023a ;Girdhar et al. ,2023 ;Shukor et al. ,2023b ;Zhang et al. ,2023 ).  \n\nICL. One of the emerging abilities when scaling LLMs, is In Context Learning (ICL) ( Brown et al. ,2020 ;Dong et al. ,2022 ); the ability to adapt the model from demonstrations. Several works target the design of the context prompt to enhance ICL effectiveness ( Lu et al. ,2022 ;Liu et al. ,2022 ;Zhao et al. ,2021 ), and improve the model’s reasoning ability ( Wei et al. ,2022 ;Zhang et al. ,2022 ;Chen et al. ,2022a ). Few works have used ICL for aligning LLMs with human preferences, such as generating safer dialogue ( Meade et al. ,2023 ) and producing harmless, honest, and helpful text (Askell et al. ,2021 ). However, the investigation of ICL in the realm LMMs remains limited, where previous studies ( Tsimpoukelli et al. ,2021 ;Alayrac et al. ,2022 ;Huang et al. ,2023 ) mainly focused on adapting pretrained LMMs to solve general benchmarks like VQA, captioning, or classification.\n\n# CBACKGROUND ONLMMS AND BASELINE MODELS\nWe consider 10 different LMMs from OpenFlamingo (OF) ( Awadalla et al. ,2023 ) and IDEFICS (Lauren c¸ on et al. ,2023 ) as described in Table 1 . For OF models; the multimodal pretraining of all models are done on part of the documents from the Multimodal-C4 dataset ( Zhu et al. ,2023b ) and image-text pairs from the english LAION 2B ( Schuhmann et al. ,2022 ). OFv2-4B models are trained additionally on ChatGPT-generated data. Note that, the first version of OF (OFv1-9B) is trained on less data compared to OFv2 models. For IDEFICS; the multimodal pretraining is done on data from OBELICS ( Lauren c¸ on et al. ,2023 ), LAION ( Schuhmann et al. ,2022 ), Wikipedia ( Foundation )and PMD ( Singh et al. ,2022 ). IDEFICS (I) is trained additionally on several instruction-tuning datasets. The architectures of all models are similar, with the main difference in the model size and initialization (which LLM). Specifically, these models consist of a frozen decoder-only LLM ( e.g. ,LLaMA, MPT), frozen vision encoder followed by a perceiver resampler ( e.g. , CLIP-ViT) and gated cross-attention injected between LLM blocks. The learnable gate in cross-attentions helps to stabilize the early stage of the training.\n\n# DEVALUATION SETUP\nThe evaluation of all models are done with zero-shot (a la Flamingo; 2-shot without images) or few-shot ICL, without any finetuning. In the paper, when we refer to evaluation we usually mean to the zero-shot setup. For efficient inference, we use the accelerate library ( Gugger et al. ,2022 )from transformers, and run all OF models with float16 (which leads to very small degradation in performance compared to running with float32). For IDEFICS the inference is done with Bfloat16. For ICL, we follow the standard approach and randomly select the examples from the corresponding datasets. For each benchmark, we randomly sample a subset of examples and divide them into separate query and context examples. Each score that we report is the average of scores after repeating the experiment 3 times. We use the official open-source implementation provided by the models’ authors.\n\n# EBENCHMARKS AND METRICS\nCOCO ( Lin et al. ,2014 ) (object hallucination) is a widely used image captioning dataset. It consists of 118K images for training and 5K for validation and testing. Each image is humanannotated with 5 different captions. We use 5K examples from the validation set. This dataset is used to evaluate object hallucinations with the CHAIR metrics ( Rohrbach et al. ,2018 ). These metrics are based on comparing the textual objects in the generated captions to the actual objects present in the image (from the segmentation annotation of COCO images).  \n\nTDIUC ( Kafle & Kanan ,2017 ) (abstention) is a VQA dataset with 168K images and 1.6M questions divided into 12 types. The questions are imported from COCO, VQA, and Visual Genome in addition to some annotated questions. One type of them is absurd questions (366K nonsensical queries about the image). We sample 8K examples ( $22\\%$ of them absurd questions) for evaluation. To report the abstention metrics, we use the same metrics used in binary classification; accuracy and F1-score which is the harmonic mean of the precision and recall.  \n\nCREPE ( Ma et al. ,2023 ) (compositionality) is a large-scale benchmark to evaluate compositionality (productivity and systematicity) in vision-language models. Based on the visual genome dataset, they propose an automated pipeline to generate hard negative captions. In this work, we focus on systematicity. For HN-Atom, the hard negatives are created by replacing the objects, attributes, and relationships in the ground truth captions with an atomic foil ( e.g. , antonyms). For HN-Comp, they concatenate two compounds, and each one of them contains an atomic foil. We evaluate on 5K examples, randomly sampled from a test set designed for LAION (as the evaluated models use LAION during pretraining). The main difference to our work is that instead of image-text retrieval, we consider this benchmark as image-text matching (ITM) or image-text selection (ITS; where the model is given a correct and incorrect caption and the task is to select which one describes the image). For these created tasks, we report the binary classification accuracy ( e.g. , for ITM if the caption describes the image or not). We stick to the accuracy as we sample balanced context demonstrations.  \n\nSugarCREPE ( Hsieh et al. ,2023 ). Is a benchmark to remedy the previous hackable datasets, by reducing the biases and shortcuts that can be exploited when evaluating compositionality. This is mainly due to using LLMs instead of rule-based templates to create hard negative examples. It covers 7 types of hard negatives; replace (objects, attributes, and relations), swap (objects and attributes) and add (objects and attributes). Each image is associated with a positive description (image caption) and several hard negative descriptions.  \n\nVQA-X ( Park et al. ,2018 ) (explainability) is based on the VQA and VQAv2 datasets, and contains 32K question/answer pairs and 41K explanations annotated by humans. The explanations are intended to explain the ground truth answer for the question, based on the corresponding image. We use the test set of this benchmark (1.9K pairs and 5.9K explanations). To evaluate the explainability performance, we consider captioning metrics such as CIDEr that are based on the syntactic similarity between the generated explanations and ground truth ones (annotated by humans).  \n\nLlaVA ( Liu et al. ,2023b ) (instruction following) consists of synthetically generated instructions of images from the COCO dataset. The authors use GPT-4 ( OpenAI ,2023 ) to generate intricate instructions that can be categorized into 3 categories; 23K detailed descriptions, 77K complex questions, and 58K examples of conversations between humans and an AI agent. To generate the instruction, GPT-4 (text-only) is prompted with several handcrafted examples (ICL). To make it understand images, the image is transformed into a set of bounding boxes and captions, passed as a sequence of textual tokens to GPT-4. For each category, we sample randomly some examples from the dataset of the same category. GPT-4 is used to evaluate models quantitatively ( Liu et al. ,2023b ). Specifically, we ask text-only GPT-4 to evaluate the performance and give a an overall score. However, evaluation based on LLMs are biased and might contain some flaws.',
                         'paper_title': 'Beyond Task Performance: Evaluating and Reducing the Flaws of Large Multimodal Models with In-Context Learning',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 6,
                         'chunk_ext_id': 454845874331751480},
                     {
                         'chunk_content': '# Systematic Inequalities in Language Technology Performance across the World’s Languages\n\n# Damián Blasi\nAntonios Anastasopoulos George Mason University   \n\nHarvard University   \n\nGraham Neubig Carnegie Mellon University\n\n# Abstract\nNatural language processing (NLP) systems have become a central technology in communication, education, medicine, artificial intelligence, and many other domains of research and development. While the performance of NLP methods has grown enormously over the last decade, this progress has been restricted to a minuscule subset of the world’s guages. We introduce a framework for estimat${\\approx}6{,}500$ laning the global utility of language technologies as revealed in a comprehensive snapshot of recent publications in NLP. Our analyses involve the field at large, but also more in-depth studies on both user-facing technologies (machine translation, language understanding, question answering, text-to-speech synthesis) as well as foundational NLP tasks (dependency parsing, morphological inflection). In the process, we (1) quantify disparities in the current state of NLP research, (2) explore some of its associated societal and academic factors, and (3) produce tailored recommendations for evidencebased policy making aimed at promoting more global and equitable language technologies.',
                         'paper_title': "Systematic Inequalities in Language Technology Performance Across the World's Languages",
                         'source_name': 'ACL', 'year': '2022', 'chunk_id': 0,
                         'chunk_ext_id': 454918603794285674},
                     {
                         'chunk_content': '# 2. Related Work\n\n# 2.1. Vision Foundation Models\nThe past decade has witnessed significant development in foundation models within the field of computer vision. Starting with the pioneering AlexNet [ 73 ], a variety of convolutional neural networks (CNNs) have emerged, continuously refreshing the ImageNet benchmark [ 33 ,40 ,57 ,62 ,65 ,95 ,148 ,160 ]. In particular, the introduction of residual connections [ 57 ] effectively addressed the problem of vanishing gradients. This breakthrough led to an era of “big & deep” neural networks, signifying that, with adequate training and data, larger and deeper models can achieve better performance. In other words, scaling up matters.  \n\nIn recent years, ViT [ 42 ] has opened up new possibilities for network architectures in the computer vision field. ViT and its variants [ 15 ,25 ,37 ,46 ,94 ,117 ,144 ,145 ,178 ,179 ]have significantly increased their capacity and excelled in various important visual tasks. In the LLM era, these vision foundation models often connect with LLMs through some lightweight “glue” layers [ 80 ,92 ,187 ]. However, a gap exists as these models primarily derive from visual-only datasets like ImageNet [ 38 ] or JFT [ 173 ], or are aligned with the BERT series [ 39 ,70 ,93 ] using image-text pairs, lacking direct alignment with LLMs. Additionally, the prevalent vision models employed to connect with LLMs are still limited to around 1 billion parameters [ 46 ,67 ], which also constrains the performance of VLLMs.\n\n# 2.2. Large Language Models\nLarge language models (LLMs) have revolutionized the field of artificial intelligence, enabling natural language processing tasks that were previously thought exclusive to humans [ 110 ,138 ,153 ]. The emergence of GPT-3 [ 153 ]brought a significant leap in capabilities, particularly in fewshot and zero-shot learning, highlighting the immense potential of LLMs. This promise was further realized with the advancements of ChatGPT and GPT-4 [ 110 ]. The progress in the field has been further accelerated by the emergence of open-source LLMs, including the LLaMA series [ 138 ,139 ], Vicuna [ 184 ], InternLM [ 135 ], MOSS [ 132 ], ChatGLM [44 ], Qwen [ 4 ], Baichuan [ 6 ], and Falcon [ 114 ], among others [ 32 ,134 ,154 ]. However, in real scenarios, interactions are not limited to natural language. The vision modality can bring additional information, which means more possibilities. Therefore, exploring how to utilize the excellent capabilities of LLMs for multi-modal interactions is poised to become the next research trend.\n\n# 2.3. Vision Large Language Models\nRecent advancements have seen the creation of vision large language models (VLLMs) [ 3 ,23 ,75 ,79 ,82 ,88 ,131 ,156 ,165 ,168 ,175 ,177 ,180 ,181 ,188 ], which aim to enhance language models with the capability to process and interpret visual information. Flamingo [ 3 ] uses the visual and language inputs as prompts and shows remarkable few-shot performance for visual question answering. Subsequently, GPT-4 [ 110 ], LLaVA series [ 91 ,92 ,100 ] and MiniGPT-4 [187 ] have brought in visual instruction tuning, to improve the instruction-following ability of VLLMs. Concurrently, models such as VisionLLM [ 147 ], KOSMOS-2 [ 115 ], and Qwen-VL et al . [ 5 ,21 ,149 ] have improved VLLMs with visual grounding capabilities, facilitating tasks such as region description and localization. Many API-based methods [ 96 ,97 ,125 ,133 ,155 ,163 ,166 ] have also attempted to integrate vision APIs with LLMs for solving vision-centric tasks. Additionally, PaLM-E [ 43 ] and EmbodiedGPT [ 108 ]represent advanced efforts in adapting VLLMs for embodied applications, significantly expanding their potential applications. These works showcase that VLLMs have achieved significant breakthroughs. However, the progress of vision and vision-language foundation models, equally essential for VLLMs, has not kept pace.\n\n# 3. Proposed Method\n\n# 3.1. Overall Architecture\nAs depicted in Figure 3 , unlike traditional vision-only backbones [ 57 ,94 ,148 ] and dual-encoder models [ 67 ,117 ,130 ], the proposed InternVL is designed with a vision encoder InternViT-6B and a language middleware QLLaMA. Specifically, InternViT-6B is a vision transformer with 6 billion parameters, customized to achieve a favorable tradeoff between performance and efficiency. QLLaMA is a language middleware with 8 billion parameters, initialized with a multilingual-enhanced LLaMA [ 32 ]. It could provide robust multilingual representation for image-text contrastive learning, or serve as a bridge to connect the vision encoder and the off-the-shelf LLM decoder.  \n\nTo align the two large-scale components with substantial gaps in modalities and structures, we introduce a progressive alignment training strategy. The training strategy is conducted progressively, beginning with contrastive learning on large-scale noisy data, and gradually moving towards generative learning on exquisite and high-quality data. In this way, we ensure the effective organization and full utilization of web-scale image-text data from a variety of sources. Then, equipped with the aligned vision encoder and language middleware, our model functions like a Swiss Army knife. It boasts a flexible composition that can be adapted for a wide array of generic visual-linguistic tasks. These tasks range from visual perception and image/videotext retrieval to image captioning, visual question answering, and multi-modal dialogue, among others.  \n\n  \nFigure 3. The training strategy of the proposed InternVL model. It consists of three progressive stages, including vision-language contrastive training, vision-language generative training, and supervised fine-tuning. These stages effectively leverage public data from diverse sources, ranging from noisy image-text pairs on the web to high-quality caption, VQA, and multi-modal dialogue datasets.  \n\nTable 1. Architecture details of the InternViT-6B model.   \n\n\n<html><body><table><tr><td>name</td><td>width</td><td>depth</td><td>MLP</td><td>#heads</td><td>#param (M)</td></tr><tr><td>ViT-G[173]</td><td>1664</td><td>48</td><td>8192</td><td>16</td><td>1843</td></tr><tr><td>ViT-e[23]</td><td>1792</td><td>56</td><td>15360</td><td>16</td><td>3926</td></tr><tr><td>EVA-02-ViT-E[130]</td><td>1792</td><td>64</td><td>15360</td><td>16</td><td>4400</td></tr><tr><td>ViT-6.5B 3[128]</td><td>4096</td><td>32</td><td>16384</td><td>32</td><td>6440</td></tr><tr><td>ViT-22B 3[37]</td><td>6144</td><td>48</td><td>24576</td><td>48</td><td>21743</td></tr><tr><td>InternViT-6B (ours)</td><td>3200</td><td>48</td><td>12800</td><td>25</td><td>5903</td></tr></table></body></html>',
                         'paper_title': 'InternVL: Scaling Up Vision Foundation Models and Aligning for Generic Visual-Linguistic Tasks',
                         'source_name': 'CVPR', 'year': '2024', 'chunk_id': 2,
                         'chunk_ext_id': 454849292752316436},
                     {
                         'chunk_content': '# 1 Introduction\nIt would be no exaggeration to say that transformer-based large language models (LLMs) have revolutionized the field of natural language processing (NLP). Kicked off by the advances presented by the GPT-x models developed by OpenAI [24 ], these types of language models currently provide state-of-the-art performance in many of the standard NLP tasks. Although LLMs were originally developed mostly to do word sequence completion tasks, with no guarantees about the completion beyond its coherence, there have been increasing claims and anecdotal evidence that they have other emergent capabilities that are not normally associated with sequence completion. Indeed, the hints of such emergent capabilities has started a veritable land rush, with researchers probing (prompting) and studying LLM behavior almost as if they were artificial organisms (c.f. [ 17 ]). Of particular interest to us in this paper is the thread of efforts that aim to investigate (and showcase) reasoning abilities of LLMs–including commonsense reasoning [ 33 ,27 ,10 ], logical reasoning [31], and even ethical reasoning [16]. The macro-tenor of the drumbeat of these works has been suggesting that LLM’s are indeed capable of doing such kinds of reasoning [18, 35, 5].  \n\nOne type of reasoning task that has been well studied in the AI community is planning and sequential decision making. At its simplest, planning involves developing a course of actions (policy) which when executed takes the agent to a desired state of the world. Planning has generally been studied primarily as an inference on world and reward models–whether specified by humans or learned by the agent by interacting with its world. In this paper, we are interested in seeing what planning abilities, if any, LLMs may already have, given their high capacity functions (with  \n\nOn the Planning Abilities of Large Language Models  \n\nbillions of tunable parameters) trained on web-scale corpora. Specifically, we are interested in answering two broad questions:  \n\n•How good are LLMs by themselves in generating and validating simple plans in commonsense planning tasks (of the type that humans are generally quite good at)?   \n•How good are LLMs in being a source of heuristic guidance for other agents–either AI planners or human planners–in their planning tasks?  \n\nNotice that in theory it is possible for LLMs to be very effective as idea generators for humans in the loop in computersupported cooperative work scenarios, while themselves being very bad at generating plans that are guaranteed to be correct. This is especially likely because the chief power of LLMs comes from their pattern finding abilities than on first-principles simulations over world models. Compared to a planner that is guaranteed to be correct in a narrow set of domains, LLMs may likely be good at generating plausible (but not guaranteed to be correct) plan heuristics/suggestions in many more domains.  \n\nTo investigate these questions in a systematic rather than anecdotal manner, we start by developing a benchmark suite 2 based on the kinds of domains employed in the International Planning Competition [ 15 ]. The tasks in the benchmark suite are aimed to test a variety of plan generation and validation capabilities. To eliminate the subjective aspect of analysis that forms the core part of many earlier efforts on evaluating reasoning capabilities of LLMs, we automate the evaluation by leveraging models and tools from the automated planning community. While our primary interest is in plan generation, the test tasks themselves form a broad curriculum for evaluating LLM’s capabilities of reasoning about actions and change.  \n\nThe evaluation itself is done in three modes. In the first “autonomous" mode, LLMs are used as stand alone, and we directly assess the quality and correctness of plans they generate. As we shall see, the results in the autonomous mode are pretty bleak. Only about $3\\%$ of the plans that LLMs generate are actually executable without errors and reach their goals. We will show that the choice of the specific LLM (we experimented with two versions of GPT3 [ 3 ,21 ] as well as BLOOM [ 28 ]), as well as fine tuning seems to have little effect on this dismal performance. We also give a human baseline by presenting these tasks to human subjects (through IRB-approved studies) and evaluating the quality and correctness of their plans. These results are substantially better than those of LLMs–confirming that LLMs can’t plan in autonomous mode.  \n\nIn the second “heuristic" mode, the plans produced by LLMs are given as input to an automated planner working off of a correct domain model to check how easy it is to “repair" the LLM plans to guarantee their correctness. Specifically we show that a well known automated planner called LPG [ 9 ], that uses local search to locate and remove flaws in a candidate plan to make it correct, is able to repair the LLM plans with relative ease.  \n\nIn the third “human-in-the-loop mode", the LLM plans are given to humans in the loop to see how it affects their ability to solve the bench mark tasks. The results here show modest improvements in the accuracy of the plans generated by humans when they start with LLM suggested plans.  \n\nBeyond our own initial studies, the goal of this work is to provide a systematic benchmark to evaluate the (evolving) planning capabilities of LLMs. To this end, we make the benchmark suite and the automated evaluation tools public to support further research.',
                         'paper_title': 'On the Planning Abilities of Large Language Models - A Critical Investigation.',
                         'source_name': 'NeurIPS', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845563927274714},
                     {
                         'chunk_content': '# 1 Introduction\nThe emergence of Large Language Models (LLMs) has marked a significant milestone in the field of AI, revolutionizing natural language processing and understanding [ 8 ,10 ,21 ,48 ,57 ,65 ,66 ]. These models, trained on vast text corpus datasets, possess rich world knowledge, making them excel in generating helpful and contextually relevant text. With the advancement of LLMs, Multimodal Large Language Models (MLLMs) have seen rapid improvements [ 1 ,12 ,35 ,41 ,48 ,63 ,86 ], which typically process the images using a pretrained visual encoder (e.g., vision transformer) and feed them to the LLM as token embeddings along with the text token embeddings. These models extend the capabilities of LLMs to engage in interesting conversations with image inputs, which enables various potential applications such as autonomous driving [ 14 ] and medical assistants [ 34 ].  \n\n  \nFig. 1: Illustration of pretraining bias during MLLM’s inference. Due to the difference in data scales between text-based pretraining and multimodal alignment, the MLLM is prone to generating contents that are frequently seen during its pretraining stage.  \n\nDespite the fascinating capabilities of state-of-the-art Multimodal Large Language Models (MLLMs), they exhibit a susceptibility to producing erroneous or hallucinatory responses that do not correspond to the input image. For instance, MLLMs often generate non-existent objects, incorrectly identify attributes such as shape or color, or provide inaccurate object counts. This issue renders MLLMs unreliable and impractical for real-world applications, particularly those with high stakes, such as autonomous driving systems [ 15 ] or medical assistants [ 34 ].  \n\nWe hypothesize one of the major causes of this phenomenon is the bias inherited from LLM’s pre-training stage. Inspired by recent research in jailbreaking of LLMs [ 17 ], we point out that MLLMs can be treated as mixture models, consisting of both distributions learned from the pretraining text corpus, as well as multi-modal alignment tuning. Specifically, the LLM undergoes an extensive pretraining stage with the large scale text corpus. Comparatively, the multimodal alignment stage in current SOTA MLLMs utilizes much fewer training samples and shorter training period. The gap between the training scales of the two phases inevitably makes the pretraining distribution dominate the generation of MLLM under certain scenarios, especially when the image is of lower quality or is not sufficiently trained during multi-modal alignment.  \n\nMotivated by the reasons above, we introduce a novel stand point to tackle the aforementioned problem. Our study draws an analogy between a blind person who, even after a cornea transplant, still instinctively prefers walking on tactile paving. We argue that the distribution bias of MLLM stemming from pretraining can be viewed as an inherited “preference” derived from past prevalent behavior. Conversely, generating responses based on image inputs represents a new “preference” that the model must adapt to. To effectively address the current challenges faced by MLLMs, we propose to use the preference learning techniques from reinforcement learning (RL) [ 11 ,87 ], which is the leading technique to adapt the model generation toward the goals of being preferred. The effectiveness of the preference learning has been showcased with its tremendous success in ChatGPT [ 48 ], Claude [ 2 ], and Gemini [ 23 ], and is known to be far more efficient than the SFT [ 55 ]. The primary goal of this paper is to extend these techniques to align the different modality of MLLMs. Specifically, the most standard and popular preference learning [ 2 ,49 ,66 ] consists of three steps:  \n\n–construct a preference dataset, which consists of a pair of samples and the preference signal indicating which one is more preferred; –model a reward function based on the preference dataset; –optimize the reward function using proximal policy optimization (PPO) [ 59 ].  \n\nWhile there are a diverse set of preference datasets in the LLMs, the preference learning in MLLMs is largely under-explored. To this end, our first contribution is an innovative strategy to obtain comparison pairs based on existing datasets with ground truth annotations. Specifically, we regard the existing datasets with ground truth annotations as positive responses, and generate negative responses by 1) Image-weakened prompting: we utilize distorted images as "weakened visual prompts" to elicit responses from the MLLM, revealing the inherent bias from pretraining. These responses contain a higher degree of erroneous patterns and align more closely with the pretraining distribution, while still being relevant to the image input. 2) LLM bias injection, we leverage the LLM component of the MLLM to directly modify the original responses using carefully designed prompts and few-shot examples, resulting in negative responses that exhibit similarities but differ in specific details from the original annotations. This collection of negative responses reveals a more pronounced bias towards the pretraining distribution, thereby exposing potential weaknesses and unreliability of the MLLM.  \n\nIn terms of algorithmic design, it is known that the PPO algorithm is unstable and sample-inefficient in aligning LLMs [ 9 ] and imposes a heavy burden on the GPU resources as it requires loading multiple (typically four) models at the same time [ 16 ,82 ]. In contrast, the recently proposed direct preference optimization (DPO) combines the reward modeling with the policy optimization into one step, and directly learns from the preference dataset (hence the name). The DPO algorithm has emerged as a promising alternative to RLHF due to its stability and competitive performance. Motivated by this, we propose a variant of DPO, referred to as the Bootstrapped Preference Optimization (BPO) ,to extend the techniques to the MLLMs, which can significantly boosts the model performance as evaluated by multiple popular visual understanding benchmarks, while reducing object hallucinations by a large margin. To summarize, we make the following contributions in this paper:  \n\n–Firstly, make take a novel view and formulate the multimodal alignment into preference learning task, where the pretraining bias and visual grounding are treated as the old and new preferences, respectively.   \n–Secondly, we introduce a novel approach to construct preference datasets automatically at scale. The collected negative samples effectively expose the pretraining bias of MLLM.   \n–Lastly, we demonstrate through empirical evidence that our approach effectively enhances the grounding of MLLM on image inputs and results in performance boost in multiple benchmarks.',
                         'paper_title': 'Strengthening Multimodal Large Language Model with Bootstrapped Preference Optimization',
                         'source_name': 'ECCV', 'year': '2024', 'chunk_id': 1,
                         'chunk_ext_id': 454846248727149222},
                     {
                         'chunk_content': '# 1 Introduction\nIn recent years, an astounding variety of text and NLP tasks have been accomplished by language models (LMs) ( Devlin et al. ,2019 ) — in essence, fitting continuous feature vectors to tokens and modeling smooth conditional distributions over thousands of token positions with multi-task objectives. The next generation of large LMs (LLMs) such as T5, GPT4 and Bard (Raffel et al. ,2020 ;OpenAI ,2023 ) developed protean capabilities, extending to mathematical and logical ability, based on prompting and in-context learning. Chain-ofthought (CoT) prompting has been a key enabler (Wei et al. ,2022 ;Feng et al. ,2023 ). LLMs can solve middle-school word problems and equations reasonably well. It has also acquired the ability to invoke specialized external tools such as Wolfram Alpha ( Wolfram ,2023 ;Schick et al. ,2023 ).  \n\nRecent advances in LLMs have arisen largely from cleverly-engineered, ever-growing training data, rather than any significant change in network structure, which remains relatively regular, but with rapidly increasing network size and number of parameters. One outcome of such giant monolithic LLMs is unsustainable levels of hardware and energy ( Dhar ,2020 ) to train them. Meanwhile, neurologists and brain scientists have known, via fMRI scans, inter alia , that cerebral functions are specialized and spatially localized ( Fedorenko and Varley ,2016 ;Mahowald et al. ,2023 ).  \n\nMany recent complex reasoning challenges thrown at LLMs have a two-level character – the input task needs to be decomposed into subtasks, then the subtasks need to be solved, and finally, subtask solutions have to be consolidated and combined to solve the input task. Existing approaches use the same LLM to both decompose and solve the task, sometimes in tangled and uninterpretable ways. Because the sharing of an LLM across these functions cannot be closely controlled, very large models are needed for this double ability (decompose and solve) to emerge.  \n\nStaying entirely inside the LLM regime, and avoiding the possibility of specialized tools, we ask a simple question – is it possible to offload the ability of problem decomposition to a dedicated, relatively smaller scale model , which is specialized and can act in synergy with any solver model of choice? To incorporate flexibility and better generalization, an immediate requirement of such a setup would be to enable a model-agnostic communication between the decomposer and the solver.  \n\n  \nFigure 1: Working example of DaSLaM on a mathematical reasoning question from the JEEBench dataset ( Arora et al. ,2023 ). In this example, the solver LM is text-davinci-003 . In step $\\textcircled{1}$ , the solver is prompted to answer the question ( blue textbox) and it fails to answer correctly ( red textbox). A problem decomposing LM generates subproblems ( violet textboxes) conditioned on the original question and the initial response of the solver in step $\\circled{2}$ . In step $\\circled{3}$ , the solver answers these subproblems iteratively and appends to the prompt. Finally, the original problem is appended to the prompt in step $\\circledast$ , and the solver answers it correctly ( green textbox).  \n\nOur contributions. To study this research question, we develop DaSLaM (Decomposition And Solution LAnguage Models), in which we separate the decomposer from the solver, as shown in Figure 1 . The solver LM can be conventionally trained or fine-tuned. In the illustration, when it answers a question incorrectly, the decomposer LM takes over to produce sub-questions. The partial solutions are appended and resubmitted to the solver LM, which solves the question correctly. The decomposer LM regards the solver as a black box, and uses reinforcement learning (RL) to become a specialized expert at decomposition, informed by the solver’s mistakes.  \n\nExtensive experiments with three reasoning datasets (MATH, AQuA, and JEEBench) show that the proposed specialization improves the performance of OpenAI GPT-3 text-davinci-003 to outperform GPT-3.5 and even begins to compete with GPT-4, outperforming other similar methods. DaSLaM boosts text-davinci-003 from an exact match accuracy of 41 .6 to 54 .5 in zero-shot regime, which is 3 .9 points higher than few-shot GPT-4. Similarly, on Physics problems from JEEBench dataset, DaSLaM -augmented text-davinci-003 scores only 0 .58 points short of GPT-4 while outperforming GPT-3.5. The decomposer LM in DaSLaM reduces decomposition errors, and generalizes well across diverse small-scale LMs. It is also more robust in the face of difficult datasets, where the solver gives near-random performance.  \n\nThese results support our founding hypothesis that heterogeneous functional specialization improves model efficiency and robustness of LLMs. A crucial findings from our experiments is that finetuning the decomposer is much more powerful choice than finetuning the solver. Moreover, a finetuned decomposer is largely superior compared to an orders of magnitude larger LLM prompted to act as a decomposer. Given the prohibitive cost of finetuning LLMs like GPT 3, 3.5, or 4, we hope this method would provide us a promising direction towards future development of task-expert models.',
                         'paper_title': 'Small Language Models Fine-tuned to Coordinate Larger Language Models Improve Complex Reasoning',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845705455426500},
                     {
                         'chunk_content': '# 2 Related Work\nThe Emergence of Large Language Models. In recent years, based on the large-scale text corpora [ 28 ,83 ,69 ,116 ,82 ,100 ], the field of Large Language Models (LLMs) has witnessed remarkable progress [ 69 ,8 ,56 ,70 ,106 ]. Prominent models such as ChatGPT [ 62 ] and GPT-4 [ 61 ] have demonstrated excellent performance across various tasks, showcasing their potential for semantic understanding, dialogue generation, programming, mathematical problem-solving, and more. However, there is a growing concern that these leading institutes are becoming increasingly conservative in sharing the technical details of their models and roadmaps. To catch up with the performance of ChatGPT, the open-source community has devoted substantial efforts [ 80 ,90 ,77 ,17 ,102 ,29 ,104 ]. For instance, Self-Instruct [ 90 ] introduced an iterative bootstrapping algorithm that leverages offthe-shelf LLMs and a seed set of manually-written instructions to expand the instruction collection. Alpaca [ 77 ] utilized the Self-Instruct technique to generate high-quality instruction-following data, which was then used to fine-tune the LLaMA [ 80 ] model. Vicuna [ 17 ] demonstrated that fine-tuning on user-shared ChatGPT conversations can spark dialog and improve instruction-following capabilities. Furthermore, there has been a focus on improving multilingual capabilities, particularly in Chinese, with LLMs like Chinese-Alpaca [ 21 ], GLM-130B [ 102 ], InternLM [ 78 ], MOSS [ 19 ], and others. These LLMs have shown excellent proficiency in learning world knowledge, which lays the groundwork for open-world understanding.  \n\nDatasets for Visual Recognition and Understanding. The dataset plays a critical role in the advancement of deep learning models, especially in the field of visual recognition and comprehension. Prior to the era of large-scale models, datasets are primarily closed-world or have limited data scale, including CIFAR-10/100 [ 44 ], ImageNet [ 23 ], and iNaturalist [ 84 ] for image classification, Pascal VOC [ 24 ], COCO [ 53 ], LVIS [ 31 ], OpenImages [ 45 ], ADE20K [ 109 ], and Cityscape [ 20 for visual location, as well as SBU [ 63 ], CC3M [ 75 ], CC12M [ 11 ], YFCC15M [ 79 ], and VQA [ 4 ], VQA 2.0 [ 30 ], ICDAR 2015 [ 40 ], SCUT-CTW1500 [ 101 ] for visual understanding. Additionally, datasets like Visual Genome [ 43 ] and Visual7W [ 115 ] integrate visual location and understanding, offering more comprehensive tasks to describe the visual world. However, these datasets have limited semantics and fail to encompass diverse scenarios in the open world, which hinders the generalization ability of models. To achieve open-world capability, CLIP [ 67 ] and ALIGN [ 37 ]propose training models using web-scale image-text pairs collected from the internet. Subsequent works, such as Laion-400M [ 73 ], Laion-5B [ 72 ], COYO-700M [ 9 ] and DataComp [ 27 ], have also been introduced for open-source research. However, these approaches only include descriptions or question-answering pairs corresponding to the entire image, resulting in models struggling to accurately recognize and understand specific objects at the instance level. Recently, Kirillov et al. introduced SA-1B [ 42 ], which provides open-world location information such as boxes and masks but still lacks semantic details. So existing datasets cannot meet the requirements of data scale, open-world location and semantics necessary for achieving visual AGI models, thus posing challenges in supporting human-like panoptic visual recognition and understanding.  \n\n  \nFigure 2: Semantic concepts and annotations in the AS-1B dataset . The semantic tags in AS-1B dataset encompass a wide range of concepts, from common objects to rare and fine-grained categories with attributes. Beyond brief semantic tags, detailed annotations, including visual-question-answering pairs and region captions are also provided.   \nTable 1: Comparison with popular vision and vision-language datasets . “#” denotes the number of something. We see that the proposed AS-1B dataset has a significantly larger data scale and diversity than prior region-level datasets.  \n\n<html><body><table><tr><td>Dataset</td><td>#Images</td><td>#Regions</td><td>#Concepts</td><td>#Tokens</td><td>Location</td><td>Semantic</td></tr><tr><td>Image-Level</td><td>15M</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>ImageNet-22K [23]</td><td>0.1M</td><td></td><td>22,000</td><td></td><td></td><td>Closed-Set Closed-Set</td></tr><tr><td>COCO Caption [15]</td><td></td><td></td><td></td><td>8.4M</td><td></td><td></td></tr><tr><td>SBU [63]</td><td>0.8M</td><td></td><td></td><td>14.6M</td><td></td><td>Open-World</td></tr><tr><td>CC12M [11]</td><td>12.4M</td><td>一</td><td></td><td>250.9M</td><td></td><td>Open-World</td></tr><tr><td>YFCC15M [38]</td><td>15M</td><td>一</td><td>一</td><td>1.0B</td><td></td><td>Open-World</td></tr><tr><td>COYO700M[9]</td><td>700M</td><td>一</td><td></td><td>15.0B</td><td></td><td>Open-World</td></tr><tr><td>Laion-5B [72]</td><td>5B</td><td></td><td></td><td>135.0B</td><td></td><td>Open-World</td></tr><tr><td>Class-Agnostic</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>SA-1B [42]</td><td>11M</td><td>1.1B</td><td></td><td></td><td>Open-World</td><td></td></tr><tr><td>Region-Level</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>COCO [53]</td><td>0.1M</td><td>0.9M</td><td>80</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>LVIS [31]</td><td>0.1M</td><td>1.5M</td><td>1,203</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>Objects365 [74]</td><td>0.6M</td><td>10.1M</td><td>365</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>Open Images [45]</td><td>1.5M</td><td>14.8M</td><td>600</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>BigDetection [10]</td><td>3.5M</td><td>36.0M</td><td>600</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>V3Det [86]</td><td>0.2M</td><td>1.5M</td><td>13,029</td><td></td><td>Closed-Set</td><td>Closed-Set</td></tr><tr><td>Visual Genome[43]</td><td>0.1M</td><td>0.3M</td><td>18,136</td><td>51.2M</td><td>Open-World</td><td>Open-World</td></tr><tr><td>AS-1B (ours)</td><td>11M</td><td>1.2B</td><td>3.5M</td><td>132.2B</td><td>Open-World</td><td>Open-World</td></tr></table></body></html>  \n\nModels for Visual Recognition and Understanding. Significant advancements have been made in the field of visual recognition and understanding in recent years. Previous methods [ 33 ,39 ,16 ,113 ,14 ,41 ,93 ,46 ] mainly concentrate on the close-set recognition while recent works begin to focus on the open world understanding. Models trained with contrastive learning-based methods, including CLIP [ 67 ], ALIGN [ 37 ], EVA [ 26 ] and FLIP [ 52 ], are able to recognize and understand the open world semantics under an image-text matching framework while the lack of generation ability limits their applicability. To address this limitation, subsequent works, such as SimVLM [ 91 ], UniPerceiver [ 114 ], VL-BERT [ 7 ], VLMo [ 6 ], BEiT-3 [ 89 ], ALBEF [ 49 ], CoCa [ 98 ], as well as Flamingo [ 2 ], have incorporated generative training tasks. However, these models are trained from scratch and do not capitalize on the powerful perception capabilities of existing powerful vision foundation models for image, and Large Language Models for text, increasing the cost of developing new models. The recent progress of LLMs [ 61 ,62 ,68 ,69 ,8 ] initiates a new era, leading to the emergency of many LLM-based multimodal models [ 48 ,47 ,111 ,54 ,96 ,104 ,60 ,87 ,12 ] and interactive systems [ 94 ,55 ,76 ,110 ,50 ,112 ,95 ]. However, these works are only capable of recognizing the entire image, lacking the ability to comprehend specific regions within the image. Some concurrent methods, such as ChatSpot [ 107 ], Shikra [ 13 ], KOSMOS-2 [ 65 ], and GPT4RoI [ 105 ] begin to focus on location-aware understanding. However, without the support of large-scale instance-level visual understanding data, the generalization ability of these models is still limited. Besides, these models only support generative tasks, limiting their application to discriminative tasks, such as image-text retrieval and zero-shot object recognition. In this work, we propose a unified locationaware image-text foundation model, based on ViT$\\mathrm{g}$ [26 ] and Husky [ 55 ]. Our model supports both image-text matching and generation tasks, expanding its range of applications and contributing to the advancement of AGI models.',
                         'paper_title': 'The All-Seeing Project: Towards Panoptic Visual Recognition and Understanding of the Open World',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 2,
                         'chunk_ext_id': 454845841462036854},
                     {
                         'chunk_content': '# II. R ELATED WORK\nIn this section, we provide a literature review pertaining to Large Language Models (LLMs), Multi-modal Large Language Models (MLLMs), and LLMs for Sequential Recommendation. Our work draws inspiration from them for the fusion of LLMs and sequential recommendation systems.\n\n# A. Large Language Models\nLanguage modeling has been extensively scrutinized for language understanding and generation over the past years, thereby catalyzing the recent emergence of Language Models (LMs) [ 7 ], [ 9 ], [ 29 ]–[ 31 ]. Pretrained LMs built on the Transformer architecture, such as BERT [ 31 ] and T5 [ 30 ], have demonstrated profound versatility owing to their largescale training corpus. More recently, researchers have delved deeper into the scaling effect by augmenting the parameter and training corpus scale to an unprecedented magnitude — encompassing billions of parameters and trillions of training tokens [ 7 ]–[ 9 ], [ 27 ], [ 29 ], [ 32 ]. These LLMs manifest substantial performance enhancements and display unique capabilities, such as common sense reasoning and instruction following. Moreover, domain-specific LLMs, such as those in the domain of finance [ 33 ], medicine [ 34 ], and law [ 35 ], are constructed by integrating domain expertise with the commonsense knowledge inherent in general LLMs. These advancements inspire us to probe the potential of LLMs in the realm of recommendation systems.\n\n# B. Multi-Modal Large Language Models\nDespite their versatility and promising performance, LLMs are restricted to textual inputs. However, a vast reservoir of information and knowledge resides in other modalities, including vision, video, and audio. Consequently, researchers have proposed Multi-modal Large Language Models (MLLMs), with the objective of amalgamating the domain of text with other modalities [ 36 ], [ 37 ]. Recent MLLMs suggest that visual feature space can be harmoniously aligned with the input textual space [ 21 ], [ 22 ], [ 38 ], [ 39 ], thereby empowering them to perform language generation tasks conditioned on visual inputs. Beyond vision, researchers have incorporated other modalities, such as video [ 40 ] and audio [ 41 ], into LMs, enabling them to digest information and knowledge of other modalities. We draw inspiration from these revelations to devise LLaRA, which fuses multi-modal information to enhance the sequential recommendation.\n\n# C. LLMs for Sequential Recommendation\nSequential recommendation leverages patterns in user behavior data to predict users’ next interaction based on their historical engagement sequence. Prior studies have explored employing complex model architectures to better characterize user preference, including Recurrent Neural Networks (RNNs) [3 ], [ 42 ]–[ 45 ], Convolutional Neural Networks (CNNs) [ 4 ], [46 ], [ 47 ], and Attention mechanisms [ 5 ], [ 48 ]. Additional studies have proposed supplementary learning tasks to improve sequential recommendation performance, including causal inference [ 49 ]–[ 51 ], data augmentation [ 52 ]–[ 54 ], and robust learning [ 55 ].  \n\nWith the advent of LLMs, researchers pay increasing attention to exploring their potential for sequential recommendation. Not only the extensive world knowledge stored in LLMs could serve as a rich source of background information for items, but also the reasoning capabilities of LLMs are able to augment the next item prediction. Previous works on the integration of LLMs into recommendation, often referred to as LLM4Rec, fall into two main categories [ 56 ], [ 57 ]: (1) LLM as the recommender and (2) LLM as the enhancer:  \n\n•LLM as the recommender. It involves training from scratch [ 10 ], tuning [ 11 ]–[ 14 ], prompting [ 15 ], or in context learning (ICL) [ 16 ], [ 17 ] an LLM on recommendation data to serve as a recommender. While studies within this category have substantiated that LLMs can be imbued with recommendation capabilities, they forfeit leveraging established yet effective recommendation models, resulting in inferior performance. •LLM as the enhancer. It augments traditional recommenders with LLM tokens or embeddings [ 19 ], [ 28 ], [58 ]. It typically utilizes LLMs as feature extractors or text generators, given their exceptional ability to integrate diverse sources and forms of information, such as item metadata. Nonetheless, the actual recommendation process is still done by conventional models, leaving the LLM’s reasoning skills untouched.  \n\nTo the best of our knowledge, LLaRA is a pioneering work that aligns traditional sequential recommendation models with LLMs. We not only capitalize on the sequential behavioral patterns learned by the well-trained traditional sequential models, but also utilize the reasoning ability and background knowledge inherent in the LLM.\n\n# III. P RELIMINARY\nTask Formulation. Sequential recommendation aims to predict the next item that aligns with a user’s interests, based on his/her historical interaction sequence [ 1 ], [ 2 ]. Formally, given a user who has chronologically engaged with item sequence $[i_{1},i_{2},...,i_{n}]$ , a sequential recommender entails predicting the next item $i_{n+1}$ this user will interact with.  \n\nCurriculum Learning. Curriculum learning [ 24 ] draws inspiration from the pedagogical strategies employed in human education, emphasizing training the model from simple to more challenging learning tasks. In general, curriculum learning contains three key stages [ 25 ]:  \n\n1) Complexity Assessment: Curriculum learning initially quantifies the complexity of each data point or task, which is then used to assign a learning priority. This step is crucial as it determines the order of tasks used for model training, and simpler tasks are typically accomplished before more challenging tasks are engaged in the learning process.   \n2) Scheduler Formulation: Based on the complexity assessment, a training scheduler can be devised to dictate the sequence and frequency of tasks that the model will be exposed to during the learning process.   \n3) Training Execution: Having devised the training scheduler, we can implement the curriculum learning process adhering to the predetermined progression.  \n\nInstruction Tuning. To enhance the capability of LLMs to better understand and respond to instructions for specific tasks, instruction tuning emerges as a pivotal approach that can substantially boost LLMs to follow human instructions. In technique, instruction tuning first requires structuring the data into the format of $(x,y)$ , where $x$ and $y$ denote the textual description of human instructions and the corresponding responses respectively. This input-output format not only encapsulates the task descriptions but also necessitates the transformation of training data into natural language, thus creating a comprehensive and coherent instructional context. Subsequently, we can tune the LLMs with $(x,y)$ following the autoregressive objective [ 6 ], [ 7 ], [ 29 ]:  \n\nDespite this reduction, it still manages to maintain acceptable performance levels. This approach effectively reduces both computational demands and processing time, making it a more efficient alternative for fine-tuning LLMs.  \n\nLoRA [ 63 ] is a typical PEFT algorithm, freezing the pretrained weights and injecting new trainable parameters. The optimizing objective of LoRA can be formulated as follows:  \n\n$$\n\\underset{\\Theta}{\\operatorname*{max}}\\sum_{(x,y)\\in\\mathcal{Z}}\\sum_{t=1}^{|y|}\\log\\left(P_{\\Phi_{0}+\\Delta\\Phi(\\Theta)}(y_{t}|x,y_{<t})\\right),\n$$  \n\nwhere LoRA introduces parameters $\\Theta$ , which are smaller in size in comparison to the original LLM parameters $\\Phi_{0}$ , and updates $\\Theta$ instead of $\\Phi$ .',
                         'paper_title': 'LLaRA: Large Language-Recommendation Assistant', 'source_name': 'SIGIR',
                         'year': '2024', 'chunk_id': 2, 'chunk_ext_id': 454846583944596150}],
                 content="""In this chapter, we will delve into the rise of Large Language Models (LLMs). As mentioned earlier, the previous chapter introduced the challenges faced by LLMs in terms of planning capabilities and the importance of enhancing these capabilities. According to the literature, LLMs have expanded language understanding and generation capabilities, revolutionizing natural language processing and understanding <sup>1</sup>. Furthermore, recent works have shown capabilities in instruction-following tasks and conversational applications after instruction tuning <sup>1</sup>. Studies show that LLMs have shown remarkable progress in various tasks, including semantic understanding, dialogue generation, programming, and mathematical problem-solving <sup>8</sup>. In summary, the rise of LLMs has brought about significant advancements in natural language processing and understanding, with promising potential for further development and application.""")
    s3 = Section(section_identifier="1_2", title="LLMs在规划能力方面的挑战",
                 description='分析了LLMs在规划能力方面面临的挑战，例如缺乏内部世界模型、难以进行多步推理和决策等。',
                 reference=[
                     {
                         'chunk_content': '# 4 RELATED WORK\nLimitations of multimodal models. Efforts have been made to address object hallucinations (Rohrbach et al. ,2018 ) by designing better training objectives ( Dai et al. ,2023b ), incorporating object labels as input ( Biten et al. ,2022 ) or costly multi-turn reasoning ( Xu et al. ,2023 ). To abstain from answering, recent work has attempted to tackle this problem by training selection functions on top of a VQA model ( Whitehead et al. ,2022 ;Dancette et al. ,2023 ). The challenge of compositionality has received significant attention, and multiple evaluation benchmarks have been proposed ( Ma et al. ,2023 ;Thrush et al. ,2022 ;Zhao et al. ,2022 ). Some solutions involve training on hard negative examples ( Yuksekgonul et al. ,2022 ) or employing improved architectures ( Ray et al. ,2023 ). The issue of explainability has been tackled in various ways, such as training auxiliary models to provide explanations ( Kayser et al. ,2021 ;Marasovi´c et al. ,2020 ;Wu & Mooney ,2019 ), or training models that generate both answers and explanations ( Sammani et al. ,2022a ). Furthermore, multimodal models also struggle to follow complex user instructions, as shown in recent work ( Liu et al. ,2023b ;Shukor et al. ,2023b ). To address this, previous work fine-tune models on instruction tuning datasets (Liu et al. ,2023b ;Xu et al. ,2022 ;Dai et al. ,2023a ;Li et al. ,2023a ;Zhu et al. ,2023a ). However, current approaches to address these limitations are focused mostly on small specialized multimodal models, and based on expensive finetuning; our ICL solutions are easier and cheaper.  \n\nEvaluation of LMMs. To achieve a more nuanced evaluation of different model abilities, concurrent works have proposed several benchmarks ( Xu et al. ,2023 ;Li et al. ,2023b ;Yu et al. ,2023 ;Liu et al. ,2023c ;Yin et al. ,2023 ). These works span evaluating multimodal models on modality comprehension ( Li et al. ,2023b ), different capabilities ( $\\mathrm{Xu}$ et al. ,2023 ) fine-grained tasks ( Liu et al. ,2023c ), complicated tasks ( Yu et al. ,2023 ) or high-level 3D tasks ( Yin et al. ,2023 ). However, these benchmarks remain focused on task performance, with novelty in creating more fine-grained tasks. Besides, we differ from these benchmarks, as we consider different LMMs with ICL ability, and focus more on limitations/alignment in the context of ICL. In general, there is still a notable lack of work evaluating the limitations of LMMs.\n\n# 5 DISCUSSION\nReproducibility statement. Each experiment is repeated 3 times with different context demonstrations. We use public datasets and official open-source implementations provided by respective authors. We release the code and detailed technical instructions to reproduce the results (Appendix D).  \n\nLimitations. The work has some limitations, further discussed in Appendix Jand Appendix A , such as the limited range of abilities that we evaluate and the limited effectiveness of ICL as a partial solution for the studied flaws and models.  \n\nConclusion. We evaluate the limitations of recent LMMs on different axes; object hallucination, answer abstention, compositionality, explainability and instruction following. Despite their scale, we find that LMMs still struggle on most of these axes. Besides, we study how ICL can affect these limitations, and find that while it might help on some abilities ( e.g. , abstention and explainability and instruction following) it can amplify the flaws of LMMs ( e.g. , hallucination) or has almost no effect at all ( e.g. , compositionality). We also propose simple ICL variants that help reducing some of the flaws. Yet, we find that the improvements coming from ICL are limited, and more complex ICL variants or other strategies, such as RLHF might be required. Finally, we hope this provides more insights about the limitations of current LMMs, and offer promising directions towards efficiently aligning foundation models ( Lin et al. ,2023 ;Li et al. ,2023e ) to human preferences and expectations.  \n\nAcknowledgments This work was partly supported by ANR grant VISA DEEP (ANR-20- CHIA-0022), and HPC resources of IDRIS under the allocation 2022-[AD011013415] and 2023- [AD011013415R1] made by GENCI. We thank Hugo Laurencon for fruitful discussions.\n\n\n\n# Supplementary material\nThis supplementary material is organized as follows:  \n\n• Appendix A : discussion about the work and future directions.   \n• Appendix Bextends our related work section.   \n• Appendix C: background on LMMs and multimodal ICL.   \n• Appendix D: more implementation details about the evaluation setup.   \n• Appendix E: details about the different datasets and benchmarks that we use.   \n• Appendix F: additional evaluation results.   \n• Appendix Hprovides additional details and results with CoH-ICL, SC-ICL and MT-ICL.   \n• Appendix I : we investigate if adding task instructions can help ICL.   \n• Appendix J: we discuss the limitations of the work.\n\n# A DISCUSSION\nOther limitations and evaluation axes. The work does not consider all existing limitations. For instance, other kinds of hallucinations, beyond objects ( e.g. , relations, actions, attributes). For answer abstention, we consider the case when the question is not relevant to the image, but not for example when the question is relevant but unanswerable, or when it requires external knowledge that the model does not know. Other important axes include evaluating the reasoning ability of these models, especially in real situations ( e.g. , embodiment) and to which extent the model prediction is grounded in the real world.  \n\nICL as a way to address foundation model limitations. Despite being effective in some benchmarks, ICL is still limited in addressing some flaws. The different variants that we propose bring additional improvements. However, more effort should be put into devising more effective variants to obtain reasonable performance. In addition, we noticed that the design of the prompt affects the results, thus more prompt engineering work can help to get additional improvement. The importance of such training-free, post-hoc approaches is, in addition to being efficient, they can be complementary to other training-based ones, such RLHF ( Christiano et al. ,2017 ;Bai et al. ,2022a ) and RLAIF ( Bai et al. ,2022b ). Finally, more effort should be put into understanding why and when ICL works, to help develop better approaches.  \n\nOther LMMs and foundation models. The work addresses one kind of LMMs that are based on the Flamingo architecture. We choose these models, as they obtain the best performance on several multimodal benchmarks, they are open source and exist with different scales. The work can straightforwardly be extended to other multimodal models that have ICL abilities. For the broader family of multimodal models, especially the instruction-tuned ones, we believe that these models are also flawed, and it is important to quantitatively assess their limitations. Besides LMMs, the proposed ICL variants might be also effective in tackling the limitations of LLMs, which have received great attention in recent years.  \n\nBeyond 9B parameters. In this work, we only consider models up to 9B parameters. The effectiveness of ICL is limited on some benchmarks probably due to the model size. In fact, the ICL performance of OF models is not very stable as shown in the original paper ( Awadalla et al. ,2023 )(e.g. , sometimes increasing the number of shots decreases the performance on VQA). Thus, it will be interesting to evaluate larger and more powerful models. In addition, as ICL becomes more effective with larger models, X-ICL approaches must be also the case, especially on benchmarks where we noticed positive correlations between scaling and performance. On harder problems such as compositionality, or hallucinations it is uncertain if ICL will become more effective.  \n\nBeyond image-text modalities. While this work addresses image-text models, we argue that similar limitations also exist in models trained on other modalities. We believe the extension of this work, especially the ICL part, is straightforward to models tackling other modalities ( e.g. , videos-text or audio-text) and have ICL abilities. In fact, we argue that most of the findings on image-text models also hold on other modalities, which is supported by recent works ( Shukor et al. ,2023a ;Girdhar et al. ,2023 ;Shukor et al. ,2023b ;Zhang et al. ,2023 ) demonstrating the feasibility of extending image-text models or using almost the same image-text techniques to address other modalities.  \n\nPerformance saturation after large number of ICL demonstrations. In our study, we notice that the performance start to saturate after large number of shots (16/32) on most of the benchmarks. This issue can be seen in several previous work, in particular, the original work of OpenFlamingo (Awadalla et al. ,2023 ) and IDEFICS ( Lauren c¸ on et al. ,2023 ). For example, in ( Awadalla et al. ,2023 ); the VQA accuracy saturates or even degrades after 4/8 shots. Similarly for IDEFICS, but slightly better. There is multiple possible reasons for why multimodal ICL is not as effective as in LLMs, such as: (a) the multimodal datasets are still an order of magnitude smaller than those for LLMs. In addition, the web documents used to train such models do not contain many interleaved image-text pairs (a lot less than 32), which might hinder the ability of the model to generalize to larger number of in-context demonstrations during test. b) The trainable parameters during pretraining, are relatively small (¡15B), and acquiring better ICL ability might require training more parameters for more iterations. Finally, we would like to highlight the lack of in depth analysis of ICL in the context of LMMs, which we keep for future work.',
                         'paper_title': 'Beyond Task Performance: Evaluating and Reducing the Flaws of Large Multimodal Models with In-Context Learning',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 5,
                         'chunk_ext_id': 454845874307634230},
                     {
                         'chunk_content': '# 1 Introduction\nIt would be no exaggeration to say that transformer-based large language models (LLMs) have revolutionized the field of natural language processing (NLP). Kicked off by the advances presented by the GPT-x models developed by OpenAI [24 ], these types of language models currently provide state-of-the-art performance in many of the standard NLP tasks. Although LLMs were originally developed mostly to do word sequence completion tasks, with no guarantees about the completion beyond its coherence, there have been increasing claims and anecdotal evidence that they have other emergent capabilities that are not normally associated with sequence completion. Indeed, the hints of such emergent capabilities has started a veritable land rush, with researchers probing (prompting) and studying LLM behavior almost as if they were artificial organisms (c.f. [ 17 ]). Of particular interest to us in this paper is the thread of efforts that aim to investigate (and showcase) reasoning abilities of LLMs–including commonsense reasoning [ 33 ,27 ,10 ], logical reasoning [31], and even ethical reasoning [16]. The macro-tenor of the drumbeat of these works has been suggesting that LLM’s are indeed capable of doing such kinds of reasoning [18, 35, 5].  \n\nOne type of reasoning task that has been well studied in the AI community is planning and sequential decision making. At its simplest, planning involves developing a course of actions (policy) which when executed takes the agent to a desired state of the world. Planning has generally been studied primarily as an inference on world and reward models–whether specified by humans or learned by the agent by interacting with its world. In this paper, we are interested in seeing what planning abilities, if any, LLMs may already have, given their high capacity functions (with  \n\nOn the Planning Abilities of Large Language Models  \n\nbillions of tunable parameters) trained on web-scale corpora. Specifically, we are interested in answering two broad questions:  \n\n•How good are LLMs by themselves in generating and validating simple plans in commonsense planning tasks (of the type that humans are generally quite good at)?   \n•How good are LLMs in being a source of heuristic guidance for other agents–either AI planners or human planners–in their planning tasks?  \n\nNotice that in theory it is possible for LLMs to be very effective as idea generators for humans in the loop in computersupported cooperative work scenarios, while themselves being very bad at generating plans that are guaranteed to be correct. This is especially likely because the chief power of LLMs comes from their pattern finding abilities than on first-principles simulations over world models. Compared to a planner that is guaranteed to be correct in a narrow set of domains, LLMs may likely be good at generating plausible (but not guaranteed to be correct) plan heuristics/suggestions in many more domains.  \n\nTo investigate these questions in a systematic rather than anecdotal manner, we start by developing a benchmark suite 2 based on the kinds of domains employed in the International Planning Competition [ 15 ]. The tasks in the benchmark suite are aimed to test a variety of plan generation and validation capabilities. To eliminate the subjective aspect of analysis that forms the core part of many earlier efforts on evaluating reasoning capabilities of LLMs, we automate the evaluation by leveraging models and tools from the automated planning community. While our primary interest is in plan generation, the test tasks themselves form a broad curriculum for evaluating LLM’s capabilities of reasoning about actions and change.  \n\nThe evaluation itself is done in three modes. In the first “autonomous" mode, LLMs are used as stand alone, and we directly assess the quality and correctness of plans they generate. As we shall see, the results in the autonomous mode are pretty bleak. Only about $3\\%$ of the plans that LLMs generate are actually executable without errors and reach their goals. We will show that the choice of the specific LLM (we experimented with two versions of GPT3 [ 3 ,21 ] as well as BLOOM [ 28 ]), as well as fine tuning seems to have little effect on this dismal performance. We also give a human baseline by presenting these tasks to human subjects (through IRB-approved studies) and evaluating the quality and correctness of their plans. These results are substantially better than those of LLMs–confirming that LLMs can’t plan in autonomous mode.  \n\nIn the second “heuristic" mode, the plans produced by LLMs are given as input to an automated planner working off of a correct domain model to check how easy it is to “repair" the LLM plans to guarantee their correctness. Specifically we show that a well known automated planner called LPG [ 9 ], that uses local search to locate and remove flaws in a candidate plan to make it correct, is able to repair the LLM plans with relative ease.  \n\nIn the third “human-in-the-loop mode", the LLM plans are given to humans in the loop to see how it affects their ability to solve the bench mark tasks. The results here show modest improvements in the accuracy of the plans generated by humans when they start with LLM suggested plans.  \n\nBeyond our own initial studies, the goal of this work is to provide a systematic benchmark to evaluate the (evolving) planning capabilities of LLMs. To this end, we make the benchmark suite and the automated evaluation tools public to support further research.',
                         'paper_title': 'On the Planning Abilities of Large Language Models - A Critical Investigation.',
                         'source_name': 'NeurIPS', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845563927274714},
                     {
                         'chunk_content': '# Acknowledgments and Disclosure of Funding\nSarath Sreedharan’s research is supported in part by grant NSF 2303019.\n\n\n\n# A Limitations\nThe proposed method has several limitations that need to be acknowledged. Firstly, its effectiveness is inherently limited by the capabilities of the LLMs it uses. As of the writing of the paper, LLMs have a number of known limitations that could prevent them from identifying the most likely models. Some of the issues include hallucination, lack of knowledge about specialized domains, the fact that it is an unsound reasoner, and so on (cf. [ 70 ,6 ]). Secondly, it is currently hard to make the prediction generation more specific to a task or a user. This arises from various challenges, including practical limitations on fine-tuning the model to these specific settings or the inability to include all the relevant information in the prompt due to limitations in prompt size and context windows.  \n\nIn Section 1.1, we noted the various flavors of model space problems in AI planning. We also noted how some of them overlap – e.g. unsolvability, executability, and explanations in domain authoring tasks – and how some of them are contained in others as a strict subset – e.g. explanations and lies. In evaluating the proposed method, we only focused on two prominent use cases, namely unsolvability, and executability. Additionally, we only considered a specific type of model update namely adding new predicates into the initial state. While theoretically, this model update can subsume any other model change, the ability of LLMs to identify likely model updates could differ based on the type of model updates considered.  \n\nFurthermore, the current study is limited to a set of domains where the reasonable or most likely changes were determined by the authors. We limited testing to a few LLMs and only considered two of the four possible configurations. It is also worth noting that effective solutions for model space search may involve additional challenges that are not being evaluated here. For example, domain authoring tasks also involve a human in the loop, which introduces additional dimensions of study beyond just figuring out which model edits are more likely – such as in figuring out how to communicate those edits effectively to the domain author. Such considerations are out of the scope of this paper. Similarly, the bastardized explainability problem that is able to generate lies, or conversely, a likely models approach that can actually catch those lies also have additional dimensions of interest, such as in mental modeling and computational ethics, which is also out of the scope of this work. We hope that this initial foray into this topic opens up future works in these directions.  \n\nIn the future, we hope to address many of the limitations of the current evaluation. This includes expanding the number of use cases studied, considering various model updates, and comparing between all the possible configurations. We will also look at the possibility of testing these methods in tasks, where we can correctly quantify the likelihood of these models. For unsolvability, this might involve focusing on scenarios where the cost of various actions possible in that setting is can be at least quantified accurately. For use cases such as domain authoring, this might correspond to cases where the ground truth is known and as such one can correctly determine what the missing or incorrect model components could be. We also hope to run user studies to evaluate the model updates generated by the method.\n\n# BModel Space Problems versus Other Meta-Reasoning Tasks in AI Planning\nWe include here some additional pointers to relevant works that either explore the evolving role of language models in planning or address other meta-reasoning tasks for planning.\n\n# Meta-Reasoning for Planning Tasks\nReasoning about a planning model rather than using that model as immutable input to plan with, can be viewed as a form of meta-reasoning. Indeed, there is a long history of work on meta-reasoning for planning tasks. However, these primarily involve a trade-off of the time taken to arrive at a solution versus the quality of the solution. Typically, in this setting, a planner can choose to stop looking for better solutions, and potentially settle for a suboptimal solution, if it believes that there is (computationally) no point in carrying on. Such approaches have been used for policy optimization in Markov Decision Processes [ 40 ], motion planning [ 66 ], planning in temporal domains [ 19 ], heuristic search [ 67 ], and so on. However, this thread of work does not aim to change the model itself to better suit a given criterion, and that is our aim.\n\n# Model Space to State Space Compilations in Human-Aware Planning\nOne meta-reasoning task that looks to change the model is “human-aware planning” – this is explicitly formulated as a planning task of finding a plan [ 10 ], and potentially some directive with it, given a basis model and the mental model of the human(s) in the loop. In this paradigm, the directive accompanying the plan may be an update to the mental model (i.e. an explanation of the plan). In contrast to the traditional meta-reasoning approaches that trade-off computation time with solution quality, the reasoning task in human-aware planning trades off the solution quality in the basis model with how it will be perceived in the mental model [15].  \n\nAt this point, we want to make it clear that even though, conceptually, the model space reasoning problems described in this paper are looking for solutions (new models) in the space of models, and classical planning tasks are looking for solutions (plans) in the space of plans, these are not technically equivalent to plan-space and state-space search approaches used in planning [ 26 ]. Indeed, if the reasoning task is compiled to be represented by a state-space representation, then both plans and models can be searched for in the space of states. The approach in [ 61 ] does exactly that for the explicability-explanations trade-off originally envisaged explicitly in model-space search in [ 15 ]. We do the same in our compilations for unsolvability and executability for LLM as a pre-processor, while for LLM as a Post Processor, we use the original model space search from [13].',
                         'paper_title': 'Can LLMs Fix Issues with Reasoning Models? Towards More Likely Models for AI Planning',
                         'source_name': 'AAAI', 'year': '2024', 'chunk_id': 6,
                         'chunk_ext_id': 454846793878173444},
                     {
                         'chunk_content': '# 7 Discussions\nOur study yields three primary insights. First, Large Language Models (LLMs) demonstrate substantial planning and collaboration capabilities within our task scenarios. With suitable promptengineering, teams of LLM-based agents perform comparably to state-of-the-art Multi-Agent Reinforcement Learning (MARL) algorithms. This finding is particularly noteworthy given that MARL agents receive extensive task-specific training with a centralized critic, while LLM-based agents operate in a fully decentralized manner and undertake tasks in a zero-shot setting. Despite prior research highlighting LLMs’ limitations in generating actionable plans and interacting with the world, they perform reasonably well when placed in a team and tasked to process actions step-by-step. Particularly, LLMs fine-tuned with Reinforcement Learning from Human Feedback demonstrate emergent social interaction skills in multi-agent collaborations, which might be similar to the collaborative and interactive settings in which human language is primarily learned and used ( Sap et al. ,2023 ).  \n\nSecond, LLMs still fall short of being optimal planners or team players due to systematic failures, such as neglecting long-horizon contexts and making inaccurate assumptions about the task state (a.k.a hallucination). These flaws significantly hinder team collaborations as they can rapidly disseminate misinformation via communication, leading to widespread false beliefs. We attempted to mitigate these issues by allowing LLM-based agents to maintain an explicit belief state about the world. Our findings suggest that modern LLMs can update the given belief descriptions based on their observations, hinting at the potential emergence of advanced cognitive skills such as world knowledge understanding and situation modeling. Moreover, belief state representations offer a structured framework that helps agents track key task-related information, leading to improved team performance.  \n\nFinally, our study indicates that the Theory of Mind (ToM) capabilities of LLMs are still limited, particularly when evaluated within interactive teamwork scenarios that involve dynamic belief states and intensive communication. For context, while 5-year-old children can perform second-order ToM inferences ( Miller ,2009 ), adults don’t consistently use this ability during communications due to the complexity and ambiguity of social interactions ( Keysar et al. ,2003 ). Thus, there’s considerable work ahead for LLMs to develop a functional ToM and interact naturally with humans. Our study represents a preliminary effort to devise novel evaluation methods for LLMs’ ToM that go beyond traditional tests such as the Sally-Anne test.\n\n# 8 Conclusions\nIn this study, we assessed the ability of recent large language models (LLMs) to conduct embodied interactions in a team task. Our results demonstrate that LLM-based agents can handle complex multi-agent collaborative tasks at a level comparable with the state-of-the-art reinforcement learning algorithm. We also observed evidence of emergent collaborative behaviors and high-order Theory of Mind capabilities among LLM-based agents. These findings confirm the potential intelligence of LLMs in formal reasoning, world knowledge, situation modeling and social interactions. Furthermore, we discussed two systematic failures that limit the performance of LLM-based agents and proposed a prompt-engineering method that mitigates these failures by incorporating an explicit belief state about world knowledge into the model input.\n\n# Limitations\nThis study represents an initial effort to understand machine intelligence in complex task scenarios. Several enhancements could improve the experimental setup and offer a more thorough evaluation of LLMs in multi-agent collaborations. First, we could incorporate additional LLMs besides OpenAI’s GPT models. As new models emerge with enhanced reasoning capabilities and larger input sizes, their performance in team tasks and ToM inference may also change. Second, the task environment is relatively simple with only five nodes and five bombs. We plan to scale up the environment and introduce more restrictions to test how LLM-based teams react to more challenging tasks. Lastly, the current team consists of three agents with homogeneous policies. It would be intriguing to evaluate how LLM-based agents perform in human-agent teams, especially from a humancentered perspective where issues like trust, transparency, and human-agent co-training can be addressed.  \n\nThe ToM capability evaluation method used in this study also has its limitations. Currently, human annotators, who have a global view of the task state and interaction history, generate the ground truth for ToM inference questions. However, this estimation is at best an approximation, assuming agents process information as a rational human would, which might be ambiguous in situations involving false beliefs or miscommunications. A potential alternative could be using each agent’s maintained belief state as the ground truth.  \n\nThe proposed belief state method could extend from introspective belief to first-order or even second-order beliefs. Currently, LLM-based agents maintain a belief state about their own world knowledge in text form. By extending this representation to include other agents’ world knowledge, we could equip LLM-based agents with an explicit first-order ToM model. Their ToM capability can be assessed by directly comparing one’s first-order belief with another’s introspective belief, rather than asking LLMs Sally-Anne style questions.\n\n# 9 Acknowledgements\nThis work was supported by DARPA award HR001120C0036 and AFOSR award FA9550-18- 1-0097.',
                         'paper_title': 'Theory of Mind for Multi-Agent Collaboration Via Large Language Models',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 5,
                         'chunk_ext_id': 454845672108609452},
                     {
                         'chunk_content': '# m’sm&m’s: A Benchmark to Evaluate Tool-Use for multi-step multi-modal Tasks\nZixian $\\mathrm{Ma^{1}}$ , Weikai Huang 1 , Jieyu Zhang 1 , Tanmay Gupta 2 , Ranjay Krishna 1 ,1 University of Washington, Allen Institute for AI  \n\nhttps://huggingface.co/datasets/zixianma/mms  \n\n  \nFigure 1. We present examples of query-plan pairs along with the execution results of the plans in m&m’s. Our benchmark contains a large quantity of diverse user queries involving three modalities (i.e. text, image, and audio) as well as human-verified plans that consist of 1 - 3 tools across three categories: multi-modal machine learning models (blue), public APIs (red) and image processing modules (yellow).\n\n# Abstract\nReal-world multi-modal problems are rarely solved by a single machine learning model, and often require multistep computational plans that involve stitching several models. Tool-augmented LLMs hold tremendous promise for automating the generation of such computational plans. However, the lack of standardized benchmarks for evaluating LLMs as planners for multi-step multi-modal tasks has prevented a systematic study of planner design decisions. Should LLMs generate a full plan in a single shot or step-by-step? Should they invoke tools directly with Python code or through structured data formats like JSON? Does feedback improve planning? To answer these questions and more, we introduce m&m’s: a benchmark containing $4K+$ multi-step multi-modal tasks involving 33 tools that include multi-modal models, (free) public APIs, and image processing modules. For each of these task queries, we provide automatically generated plans using this realistic toolset. We further provide a high-quality subset of 1,565 task plans that are human-verified and correctly executable. With m&m’s, we evaluate 6 popular LLMs with 2 planning strategies (multi-step vs. step-by-step planning), 2 plan formats (JSON vs. code), and 3 types of feedback (parsing/verification/execution). Finally, we summarize takeaways from our extensive experiments. Our dataset and code are available on HuggingFace 1 and Github 2 .\n\n# 1. Introduction\nPlanning agents—powered by large language models (LLMs)—are becoming increasingly proficient at decomposing user-specified tasks into a series of subtasks, where each subtask is executed by invoking tools. For example, if a user wants to learn about a new piece of clothing in an image, the LLM planner can create a plan with multiple steps: first, it invokes an image classification tool to identify the clothing as a “kimono”; once identified, it can issue a Wikipedia search query to explain the cultural relevance of the kimono (Figure 1 first row).  \n\nLLM planning agents typically consist of an LLM and a set of tools to plan over. Given an LLM and toolset, the design space of planning agents is extremely rich, involving many decisions such as planning strategy (e.g. generation of the whole plan vs one step of the plan at a time), forms of feedback (e.g. no feedback or parsing/verification/execution feedback), and plan format (e.g. JSON strings that specify tools and arguments vs free-form Python code).  \n\nUnfortunately, there is no existing planning benchmark that supports evaluation along this combinatorially rich design space with a realistic set of multimodal tools. Recent concurrent benchmarks such as ToolEmu and TaskBench [ 21 ,24 ] provide user queries and ground truth plans but lack support for realistic plan execution. For instance, TaskBench assumes that a list of tools is available for planning without providing actual implementation of the tools. TaskBench also does not instantiate the queries with actual inputs and uses placeholder input filenames like “example.png” that do not exist. ToolEmu likewise uses LLMs to emulate tool execution instead of providing tool implementations. The lack of actual implementations of tools and real execution feedback while planning makes the study of the design space elucidated above unrealistic at best, if not impossible.  \n\nMotivated by this dire need for a standardized benchmark for studying the design space of multi-step multimodal planning agents, we first propose the m&m’s benchmark. m&m’s contains $4{\\mathrm{K}}+$ realistic user tasks and automatically generated task plans. 1565 of these task plans are human-verified and executable with 33 curated tools consisting of multi-modal models, (free) public APIs, and image processing modules.  \n\nNext, we use m&m’s to systematically study the impact of 2 planning strategies (step-by-step and multi-step), 2 kinds of feedback (verification and execution), and 2 plan formats (JSON and code). Through extensive experimentation with 6 popular open-source (LLaMA-2-7B, LLaMA2-13B, Mixtral-8x7B) and proprietary LLMs (Gemini-pro, GPT-3.5, GPT-4) of varying sizes, we provide a series of findings: First, existing LLMs instructed to perform multistep planning consistently outperform step-by-step planning, regardless of the model size. This is in contrast to the popularity of ReAct-style [ 40 ] planning that is predominant today. Second, feedback improves LLM’s ability to predict the correct argument name for each tool and generate overall executable tool plans but doesn’t necessarily improve the ability to choose the right tools. Third, most models (except for Llama-2-7b) perform comparably on tool prediction with JSON-format generation and Python code generation, but they all produce more executable plans with JSON-format generation than with code generation. Taken together, our experiments suggest that for m&m’s tasks, multi-step planning in JSON with feedback can result in the best overall tool-use performance compared to step-by-step planning, code generation, or the same setup without feedback.',
                         'paper_title': "M M's: A Benchmark to Evaluate Tool-Use for Multi-Step Multi-Modal Tasks",
                         'source_name': 'ECCV', 'year': '2024', 'chunk_id': 0,
                         'chunk_ext_id': 454846261297216942},
                     {
                         'chunk_content': '# Reasoning with Language Model is Planning with World Model\nShibo $\\mathbf{Ha0}^{**}$ Yi $\\mathbf{Gu}^{**}$ Haodi $\\mathbf{M}\\mathbf{a}^{\\diamond}$ Joshua Jiahua Hong ♣Zhen Wang ♣♠ Daisy Zhe Wang ♢Zhiting $\\mathbf{H}\\mathbf{u}^{\\mathbf{*}}$  \n\n♣UC San Diego, ♢University of Florida   \n♠Mohamed bin Zayed University of Artificial Intelligence   \n{s5hao, yig025, jjhong, zhw085, zhh019}@ucsd.edu {ma.haodi, daisyw}@ufl.edu\n\n# Abstract\nLarge language models (LLMs) have shown remarkable reasoning capabilities, particularly with chain-of-thought (CoT) prompting. However, LLMs sometimes still struggle with problems that are easy for humans, such as generating action plans to achieve given goals in an environment, or performing complex math or logical reasoning. The deficiency stems from the key fact that LLMs lack an internal world model to predict the world state (e.g., environment status, intermediate variable values) and simulate long-term outcomes of actions. This prevents LLMs from performing deliberate planning akin to human brains, which involves exploring alternative reasoning paths, anticipating future states and rewards, and iteratively refining existing reasoning steps. To overcome the limitations, we propose a new LLM reasoning framework, Reasoning vi a Planning (RAP) . RAP repurposes the LLM as both a world model and a reasoning agent, and incorporates a principled planning algorithm based on Monte Carlo Tree Search for strategic exploration in the vast reasoning space. During reasoning, the LLM (as agent) incrementally builds a reasoning tree under the guidance of the LLM (as world model) and rewards, and efficiently obtains a high-reward reasoning path with a proper balance between exploration vs. exploitation. We apply RAP to various challenging reasoning problems including plan generation, math reasoning, and logical inference, and demonstrate its superiority over strong baselines. RAP with LLaMA-33B even surpasses CoT with GPT-4, achieving $33\\%$ relative improvement in a plan generation setting.\n\n# 1 Introduction\nLarge language models (LLMs) have exhibited emergent reasoning abilities in a wide range of tasks ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ;  \n\nOpenAI ,2023 ). Recent approaches further boost their ability by prompting LLMs to generate intermediate reasoning steps, e.g., Chain-of-Thought, CoT ( Wei et al. ,2022 ) or answer a series of subquestions, e.g., least-to-most prompting ( Zhou et al. ,2022 ). However, LLMs still face difficulties with tasks that humans find easy. For example, in creating action plans to move blocks to a target state, GPT-3 ( Brown et al. ,2020 ) achieves a success rate of only $1\\%$ , compared to $78\\%$ for humans ( Valmeekam et al. ,2022 ); these models also struggle with complex tasks that require multiple steps of math, logical, or commonsense reasoning ( Huang and Chang ,2022 ;Mialon et al. ,2023 ).  \n\nHumans possess an internal world model , a mental representation of the environment ( JohnsonLaird ,1983 ,2010 ;Gentner and Stevens ,2014 ), which enables humans to simulate actions and their effects on the world’s state for deliberate planning for complex tasks of motor control, imagery, inference, and decision making ( Tolman ,1948 ;Briscoe ,2011 ;Schulkin ,2012 ;LeCun ,2022 ). For example, to make an action plan towards a goal, planning with the world model involves exploring various alternative courses of actions, assessing the likely outcomes by rolling out possible future scenarios, and iteratively refining the plan based on the assessment ( Huys et al. ,2012 ;Gasparski and Orel ,2014 ;Ho et al. ,2021 ). This is in stark contrast to the current LLM reasoning, which instinctively generates a reasoning trace in an autoregressive manner. In particular, we identify several key limitations of the current reasoning with LLMs, including (1) the lack of an internal world model to simulate the state of the world (e.g., the configuration of blocks, the values of intermediate variables), which is the foundation of human planning; (2) the absence of a reward mechanism to assess and guide the reasoning towards the desired state; and due to both limitations, (3) the incapability of balancing exploration vs. exploitation to efficiently explore vast  \n\n  \nFigure 1: An overview of Reasoning via Planning (RAP). Compared with previous LLM reasoning methods like Chain-of-Thought ( Wei et al. ,2022 ), we explicitly model the world state from a world model (repurposed from the language model), and leverage advanced planning algorithms to solve the reasoning problems.  \n\nreasoning space.  \n\nTo address these limitations, this paper proposes a new framework, Reasoning via Planning (RAP) ,that enables LLMs to reason in a manner close to humans’ conscious planning. RAP augments the LLM with a world model, and reasons with principled planning (specifically Monte Carlo Tree Search, MCTS ) to produce high-reward reasoning traces after efficient exploration (Figure 1 ). Notably, we acquire the world model by repurposing the LLM itself with appropriate prompts. During the reasoning, the LLM strategically builds a reasoning tree by iteratively considering the most promising reasoning steps ( actions ) and using the world model (the same, repurposed LLM) to look ahead for future outcomes. The estimated future rewards are then backpropagated to update the LLM’s beliefs about the current reasoning steps, guiding it to refine the reasoning by exploring better alternatives. Our MCTS-based planning effectively maintains a proper balance between exploration (of unvisited reasoning traces) and exploitation (of the best reasoning steps identified so far).  \n\nWe show RAP is a general framework applicable to a diverse range of challenging problems and achieves substantial improvements over recent popular LLM reasoning methods. For plan generation, particularly in 2/4/6-step problems of Blocksworld (Valmeekam et al. ,2023 ), RAP achieves an average success rate of $64\\%$ while CoT fails almost completely. Moreover, LLaMA-33B with RAP surpasses GPT-4 with CoT by $33\\%$ relative improvement. In the domains of mathematical reasoning, such as GSM8K ( Cobbe et al. ,2021 ) and logical inference exemplified by PrOntoQA ( Saparov and He ,2022 ), RAP also consistently improves over strong baselines, including CoT, least-to-most prompting, and their self-consistency variants.',
                         'paper_title': 'Reasoning with Language Model is Planning with World Model.',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 0,
                         'chunk_ext_id': 454845659291863696},
                     {
                         'chunk_content': '# 7 Conclusion and Future Work\nIn this paper, we presented a critical investigation of the planning abilities of large language models (LLMs). To this end, we first provided an extensible benchmark where researchers can evaluate current and future large language models. We evaluated the planning abilities of LLMs in three different modes. In the autonomous mode, our results show that even in simple common-sense planning domains where humans could easily come up with plans, current SOTA LLMs like GPT-3 and BLOOM exhibit a dismal performance. In the heuristic mode, we have seen that plans generated by LLMs can be quickly corrected by sound planners like LPG to guarantee soundness. Finally, in the human in the loop mode, we have seen that human planners are slightly better off with an LLM assisting them as having an LLM as a plan assistant showcased modest improvements in the accuracy of the plans generated by the human-in-the-loop.  \n\nWe look to improve our assessment suite in multiple ways in the future. We plan to include a modified version of the reasoning about plan execution task to ask questions that require a more descriptive answer and provide automated validations for the answers. This benchmark can be extended to other domains, either to common-sense domains (like Virtual Home [ 23 ]) or to specialized ones. We have also performed additional experiments including evaluating a version of GPT-3 fine-tuned on blocksworld instances, and evaluating LLMs on disguised blocksworld domains. These experiments are described in the Appendix. In conclusion, we hope that this benchmark 6 encourages other researchers to test the capabilities of their systems across different LLM models [ 5 ,8 ,30 ,37 ,25 ,34 ,12 ] and even those that are fine-tuned for such tasks.\n\n# 8 Acknowledgements\nKambhampati’s research is supported by the J.P. Morgan Faculty Research Award, ONR grants N00014-16-1-2892, N00014-18-1-2442, N00014-18-1-2840, N00014-9-1-2119, AFOSR grant FA9550-18-1-0067 and DARPA SAIL-ON grant W911NF19-2-0006. We also want to thank OpenAI and Miles Brundage for letting us get early research access to the GPT-3 API.\n\n\n\n# A Appendix\n\n# A.1 Additional Experiments\nIn the following sections, we will look at additional experiments that were done only on the test cases that correspond to actual planning problems (which are Plan Generation and Optimal Planning).\n\n# A.1.1 Fine tuning\nTable 2: Results of Plan Generation and Optimal Planning in the Blocksworld Domain on Finetuned-GPT3   \n\n\n<html><body><table><tr><td>Task</td><td>Instancescorrect Finetuned-GPT3</td></tr><tr><td>Plan Generation</td><td>82/500 (16.4%)</td></tr><tr><td>Optimal Planning</td><td>110/500 (22%)</td></tr></table></body></html>  \n\nAlong with testing GPT-3, Instruct-GPT3 and BLOOM, we have also looked at the utility of fine-tuning GPT-3 on the blocksworld domain. We prepared a dataset consisting of the initial state, goal state and the respective plan for 1000 blocksworld instances. These instances were different from our test set of 500 instances. We fine-tuned GPT-3 (davinci) on this dataset (using the default hyperparameters provided by Open-AI and 80-20 data split) and evaluated on the two test-cases which correspond to actual planning problems. Even though the results (in Table 2) showcase an uptick in the number of successful plans, the overall performance is still around $20\\%$ . This is unsurprising as [ 36 ] point out that language models tend to focus on the inherent statistical features in reasoning problems which affects their performance on such tasks.\n\n# A.1.2 Mystery blocksworld domain\n<html><body><table><tr><td rowspan="2">Task</td><td colspan="3">Instances correct</td></tr><tr><td>GPT-3</td><td>Instruct-GPT3</td><td>BLOOM</td></tr><tr><td>Plan Generation</td><td>0/600 (0%)</td><td>7/600 (1.1%)</td><td>0/50 (0%)</td></tr><tr><td>Optimal Planning</td><td>0/600 (0%)</td><td>8/600 (1.3%)</td><td>0/50 (0%)</td></tr></table></body></html>\n\nTable 3: Results of Plan Generation and Optimal Planning in the Mystery Blocksworld Domain with deceptive disguising on GPT-3, Instruct-GPT3 and BLOOM.  \n\n<html><body><table><tr><td rowspan="2">Task</td><td colspan="2">Instancescorrect</td></tr><tr><td>GPT-3</td><td>Instruct-GPT3</td></tr><tr><td>Plan Generation</td><td>1/600 (0.1%)</td><td>5/600 (0.8%)</td></tr><tr><td>Optimal Planning</td><td>3/600 (0.5%)</td><td>4/600 (0.6%)</td></tr></table></body></html>\n\nTable 4: Results of Plan Generation and Optimal Planning in the Mystery Blocksworld Domain with randomized disguising on GPT-3, Instruct-GPT3 and BLOOM.  \n\nAnother popular domain used in the planning literature is the Mystery domain created by Drew McDermott. In this domain, the goal is to get cargo items from one place to another with vehicles having fuel availability constraints. But the domain is disguised by changing the names of predicates and actions to unrelated entities. Testing LLMs on such a domain would give us insights into whether LLMs are infact performing abstract reasoning or if they are using any common-sense knowledge (such as the meaning of the action/predicate names) in coming up with plans. The domain can be disguised in two different ways, deceptive or randomized. Deceptive disguising would require using words that have meaning by themselves but are unrelated in terms of cause and effect, thereby deceiving the LLM. Randomized disguising would use random alpha-numeric names to disguise the domain. For better comparison of results, instead of the original mystery domain, we came up with a mystery domain which disguises the blocksworld domain on which the LLMs have already been evaluated. We used both deceptive and randomized disguising and have evaluated on Plan Generation and Optimal Planning test cases. For deceptive disguising, we have evaluated GPT-3, Instruct GPT-3 and BLOOM whereas for randomized disguising we have evaluated GPT-3 and Instruct GPT-3. The results (in Table 3 and Table 4) showcase a decrease in the number of successful plans generated by the LLMs in the mystery blocksworld as opposed to the original blocksworld. These results indicate that LLMs might not be reasoning at an abstract level and might be relying on the underlying meanings of the actions/predicates and the relations between them while coming up with plans.',
                         'paper_title': 'On the Planning Abilities of Large Language Models - A Critical Investigation.',
                         'source_name': 'NeurIPS', 'year': '2023', 'chunk_id': 7,
                         'chunk_ext_id': 454845564034229478},
                     {
                         'chunk_content': '# Towards More Likely Models for AI Planning\nTurgay Caglar Colorado State University   \n\nSirine Belhaj Ecole Polytechnique de Tunisie   \n\nTathagata Chakraborti IBM Research   \n\nMichael Katz IBM Research   \n\nSarath Sreedharan Colorado State University\n\n# Abstract\nThis is the first work to look at the application of large language models (LLMs) for the purpose of model space edits in automated planning tasks. To set the stage for this sangam, we explore two different flavors of model space problems that have been studied in the AI planning literature and explore the effect of an LLM on those tasks. We empirically demonstrate how the performance of an LLM contrasts with combinatorial search (CS) – an approach that has been traditionally used to solve model space tasks in planning, both with the LLM in the role of a standalone model space reasoner as well as in the role of a statistical signal in concert with the CS approach as part of a two-stage process. Our experiments show promising results suggesting further forays of LLMs into the exciting world of model space reasoning for planning tasks in the future.\n\n# 1 Introduction\nAI planning or automated planning (used interchangeably) is the task of synthesizing the goal-directed behavior of autonomous agents. Traditionally, the AI planning community has looked at the classical planning problem as one of generating a plan given a model of the world [ 26 ]. Here, “model” or a “planning problem” refers to a collection of constraints describing the current state of the world (initial state), the actions available to the agent along with the conditions under which the agent can do those actions and the effect of doing those actions on the environment, and a target (goal) state for the agent to achieve. The plan is a sequence of actions that the agent can use to transform the current state to the desired goal state.  \n\nTypically, these models are represented using the planning domain definition language or PDDL [29 ,44 ] – we will use the same in this paper. All the information to derive this solution (plan) is contained in the input model which remains static during the planning task. But what if the model itself needs to be changed?  \n\nThis may be because it is incorrect, or incomplete, or even unsolvable. It may be because it needs to be changed to support some new behaviors. It may also be because the model is being used to describe a world that itself needs to change through the actions of an agent. In practice, the deployment of systems that can plan involves a whole gamut of challenges in authoring, maintaining, and meta-reasoning about models of planning tasks.  \n\n  \nFigure 1: A conceptual illustration of model space problems in AI planning. Instead of the classical That criterion in Figure 1a is that the initially unsolvable model becomes solvable (or an initially planning task of computing a plan given a model, a model space and a target criterion to satisfy, and the solution is a new model Mwhere that criterion is satisfied. starts with a starting model $\\mathcal{M}$ invalid plan in model is the mental model of the user that needs to be updated and the target is a new model that $\\mathcal{M}$ becomes valid in the new model $\\mathcal{M}_{1})$ ). In Figure 1b, on the other hand, the starting can explain a given plan (or refute a given foil). In domain authoring situations, such model updates happen with the domain writer in the loop, and the starting model is the model under construction (Figure 1c). In all these cases, there are many non-unique model edits required criterion. In this paper, we explore if LLMs can produce more likely edits in real-worldly $\\mathcal{M}_{1}\\Delta\\mathcal{M}$ that can satisfy the domains.\n\n# 1.1 Model Space Problems in AI Planning\nWe begin by enumerating different flavors of model space reasoning tasks explored in the AI planning literature. All of them involve a starting model which has something wrong with it and the solution is a new model where the problem has been resolved or the required criterion has been met.\n\n# Unsolvability\nPerhaps the most difficult of model space problems, especially with humans in the loop, is that of unsolvability. This is because when a model is unsolvable, there is no artifact (such as an outputted plan) to look at for debugging purposes. While there have been a lot of efforts, including an ongoing competition [ 49 ], to detect unsolvability of planning tasks up-front to speed up calls to a planning module [ 5 ,47 ], and attempts to compute or even learn heuristics [ 31 ,64 ,65 ] and produce certificates [21 ,22 ,20 ] for unsolvable tasks, to make this process as efficient as possible, these do not help to fix the issues with the model that make it unsolvable in the first place.  \n\nOne of the seminal works in this category [ 27 ] framed the problem as “excuse generation” where the authors envisaged a reformulation of the input planning task where if only (i.e. an excuse) certain things about the current state were changed then it would become solvable. In addition to initial state changes, this idea was later extended [ 30 ] to cover other parts of the model and framed as a more general “planning task revision” problem.  \n\nWhile these works do not particularly consider a human in the loop, authors in [ 62 ,60 ] have looked at the problem of explaining unsolvability of planning tasks to users explicitly as a model evolution problem, using techniques like domain abstractions (simplifications) to adjust to users with different levels of expertise. Later efforts [ 34 ] have borrowed from these concepts and tried to operationalize them for developers.',
                         'paper_title': 'Can LLMs Fix Issues with Reasoning Models? Towards More Likely Models for AI Planning',
                         'source_name': 'AAAI', 'year': '2024', 'chunk_id': 0,
                         'chunk_ext_id': 454846793634903800},
                     {
                         'chunk_content': '# ON THE PLANNING A BILITIES OF LARGE LANGUAGE MODELS (A C RITICAL I NVESTIGATION WITH A PROPOSED BENCHMARK )\n\n# Karthik Valmeekam\n\n# Sarath Sreedharan∗\nSchool of Computing & AI Arizona State University, Tempe.   \n\nMatthew Marquez School of Computing & AI Arizona State University, Tempe.   \n\nDepartment of Computer Science, Colorado State University, Fort Collins.   \n\nAlberto Olmo School of Computing & AI Arizona State University, Tempe.   \n\nSubbarao Kambhampati School of Computing & AI Arizona State University, Tempe.\n\n# A BSTRACT\nIntrigued by the claims of emergent reasoning capabilities in LLMs trained on general web corpora, in this paper, we set out to investigate their planning capabilities. We aim to evaluate (1) how good LLMs are by themselves in generating and validating simple plans in commonsense planning tasks (of the type that humans are generally quite good at) and (2) how good LLMs are in being a source of heuristic guidance for other agents–either AI planners or human planners–in their planning tasks. To investigate these questions in a systematic rather than anecdotal manner, we start by developing a benchmark suite based on the kinds of domains employed in the International Planning Competition. On this benchmark, we evaluate LLMs in three modes: autonomous, heuristic and human-in-the-loop .Our results show that LLM’s ability to autonomously generate executable plans is quite meager, averaging only about $3\\%$ success rate. The heuristic and human-in-the-loop modes show slightly more promise. In addition to these results, we also make our benchmark and evaluation tools available to support investigations by research community.',
                         'paper_title': 'On the Planning Abilities of Large Language Models - A Critical Investigation.',
                         'source_name': 'NeurIPS', 'year': '2023', 'chunk_id': 0,
                         'chunk_ext_id': 454845563909973208},
                     {
                         'chunk_content': '# 1 I NTRODUCTION\nThe quest for building generalist assistants has garnered significant attention and effort ( OpenAI ,2023 ;Gao et al. ,2023 ). The recent breakthroughs in Large Language Models (LLMs) ( Brown et al. ,2020 ;Chowdhery et al. ,2022 ;Touvron et al. ,2023b ) represent a promising initial step towards this goal, achieving near-human performance across numerous NLP tasks. However, their confinement to the single textual modality remains a significant limitation in developing universal models. Consequently, the focus has shifted to building multimodal models that transcend generation and understanding across text and images ( Huang et al. ,2023 ;Yu et al. ,2022 ;Wang et al. ,2022a ). The prevailing approach to develop Large Multimodal Models (LMMs), is to build on top of LLMs, bridging the gap between language and the other modalities. Those “augmented language models” ( Alayrac et al. ,2022 ;Mialon et al. ,2023 ;Shukor et al. ,2023a ) beat previous models ( Chen et al. ,2020 ;Li et al. ,2021 ;Dou et al. ,2021 ;Shukor et al. ,2022 ) on almost all benchmarks.  \n\nAlthough LMMs have achieved remarkable scores, measuring the task performance alone, such as their prediction accuracy on general benchmarks ( e.g. , VQA accuracy or CIDEr for captioning), is insufficient to assess their genuine capabilities. For example, performances on those tasks may artificially increase simply by exploiting dataset biases and shortcuts, without truly understanding and generalization ( Geirhos et al. ,2020 ;Dancette et al. ,2021 ;Du et al. ,2022 ). While evaluating LLMs (Chang et al. ,2023 ;Li et al. ,2023d ) and small multimodal models ( Ma et al. ,2023 ;Dai et al. ,2023b )has received attention, the evaluation of recent LMMs has been comparatively overlooked. This is becoming increasingly important as recent works ( Alayrac et al. ,2022 ;Shukor et al. ,2023b ;a ), in preliminary investigations, have highlighted qualitatively several major flaws ( e.g. , hallucinations), showing that LMMs are still not aligned with the needs for deployment in real-world applications.  \n\n  \nFigure 1: Evaluation framework. We study LMMs following 3 strategies, on different axes; hallucinations, abstention, compositionality, explainability and instruction following. In addition to an image <image> and a question Tused in zero-shot (ZS), in-context learning (ICL) considers $N$ demonstrations of images-questionsanswers ( <image $>_{i}$ ,$\\mathrm{T}_{i},\\mathrm{R}_{i})$ as input $X$ , augmented by a function $f$ in our X-ICL.  \n\nAs argued in Askell et al. (2021 ), LLMs should be helpful, honest, and harmless to align with human preferences. Similarly, we argue that this should also be the case for LMMs, which becomes an urgent requirement with the exponential performance improvements. Thus, LMMs must be helpful (e.g. , provide explanations, follow user instructions), honest ( e.g. , abstention or the ability to say I don’t know, no hallucinations), truthful and harmless ( e.g. , no hallucinations, especially in critical applications), generalize well and understand semantics ( e.g. , compositionality). Thus, we start by asking the following question: to which extent LMMs are aligned with human expectations?  \n\nTo provide an answer, we propose a different set of experiments, evaluating LMMs on 5 axes. (1) Object hallucinations (OH) (honest, harmless), where LMMs generate text predictions referring to objects not present in the input image ( Rohrbach et al. ,2018 ;Dai et al. ,2023b ). (2) Abstention (honest), or the ability to abstain from answering, to avoid incorrect responses when the input image cannot provide the required information ( Whitehead et al. ,2022 ). (3) Compositionality (helpful, generalization) wherein the meaning of the sentence depends only on its constituents ( Werning et al. ,2012 ;Lake et al. ,2017 ) allowing to generalize to an infinite number of compositions. Users might ask the model to (4) explain (helpful) its answers as a means to understand the underlying rationale. In addition, a true assistant should engage in conversations with users and (5) precisely follow their complex instructions (helpful) ( Liu et al. ,2023b ). The conclusion of our study is that current LMMs lack proficiency in these aspects, revealing that scaling alone is not enough. Specifically, LMMs generate plausible and coherent answers instead of faithful and truthful ones (Section 2.1 ), provide answers when they do not know (Section 2.2 ), lack compositionality (Section 2.3 ), struggle to provide good explanations (Section 2.4 ) or precisely follow user instructions (Section 2.5 ).  \n\nWe then investigate how to tackle these limitations. The current go-to solution to align these models is with training ( e.g. instruction tuning, RLHF). Here, we rather focus on efficient approaches. For LLMs, a cheap, and effective alternative to finetuning is In-Context Learning (ICL), which is used to adapt the model to a particular task, a recently have been used to align LLMs ( Lin et al. ,2023 ).  \n\nWhile ICL has been extensively investigated for LLMs ( Lu et al. ,2022 ;Liu et al. ,2022 ;Wei et al. ,2022 ), its application to LMMs has received less attention and mainly focuses on adaptation to new image-text tasks ( Tsimpoukelli et al. ,2021 ;Alayrac et al. ,2022 ). In this work, we explore to which extent we can efficiently tackle LMMs flaws using different variants of multimodal ICL. Our main contributions are:  \n\n•We evaluate 10 recent LMMs (from 3B to 80B) and show important flaws on 5 axes; object hallucinations, answer abstention, compositionality, explainability and instruction following. •We explore Multimodal ICL as a remedy, and study its effect on these abilities. We show that while ICL can help on some aspects (explainability, abstention), it has marginal effect (instruction following), no effect (compositionality) or even worsen hallucinations. •Based on our ICL study, we propose simple and novel ICL variants such as; MultitaskIn-Context-Multitask-Learning (MT-ICL), Chain-of-Hindsight-ICL (CoH-ICL), and SelfCorrecting-ICL (SC-ICL). We show the effectiveness of these variants on several abilities.  \n\nTable 1: Evaluated LMMs . We evaluate 10 models that differ in size, training data, and LLM initialization. Tr: training/trainable. (I): instruction. P/D: image-text pairs/web documents. $^*$ use additional ChatGPT data.   \n\n\n<html><body><table><tr><td>Model</td><td>#Tr.params.</td><td>#Tr. samples (P/D)</td><td>Language model</td><td>VisionModel</td><td>(1)Tuning</td></tr><tr><td>OFv2-3B</td><td>1.05B</td><td>60M/120M</td><td>MPT-1B(Team,2023)</td><td>CLIPViT-L/14</td><td></td></tr><tr><td>OFv2-3B(I)</td><td>1.05B</td><td>60M/120M</td><td>MPT-1B(Instruct)(Team,2023)</td><td>CLIPViT-L/14</td><td>√</td></tr><tr><td>OFv2-4B</td><td>1.09B</td><td>60M/120M</td><td>RedPajama-3B(together.ai,2023)</td><td>CLIPViT-L/14</td><td></td></tr><tr><td>OFv2-4B (I)</td><td>1.09B</td><td>60M*/120M</td><td>RedPajama-3B(Instruct)(together.ai,2023)</td><td>CLIPViT-L/14</td><td>√</td></tr><tr><td>OFv2-9B</td><td>1.38B</td><td>60M*/120M</td><td>MPT-7B (Team,2023)</td><td>CLIPViT-L/14</td><td></td></tr><tr><td>OFv1-9B</td><td>1.31B</td><td>5M/10M</td><td>LlaMAv1-7B(Touvron etal.,2023a)</td><td>CLIPViT-L/14</td><td></td></tr><tr><td>IDEFICS-9B</td><td>2B</td><td>141M+/1.82B</td><td>LlaMAv1-7B 3（Touvronetal.,2023a）</td><td>OpenCLIPViT-H/14</td><td></td></tr><tr><td>IDEFICS-9B(I)</td><td>9B</td><td>141M+/1.82B</td><td>LlaMAv1-7B(Touvronetal.,2023a)</td><td>OpenCLIPViT-H/14</td><td>√</td></tr><tr><td>IDEFICS-80B</td><td>15B</td><td>141M+/1.82B</td><td>LlaMAv1-65B(Touvronetal.,2023a)</td><td>OpenCLIPViT-H/14</td><td></td></tr><tr><td>IDEFICS-80B(I)</td><td>80B</td><td>141M+/1.82B</td><td>LlaMAv1-65B（Touvronetal.,2023a）</td><td>OpenCLIPViT-H/14</td><td></td></tr></table></body></html>',
                         'paper_title': 'Beyond Task Performance: Evaluating and Reducing the Flaws of Large Multimodal Models with In-Context Learning',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 1,
                         'chunk_ext_id': 454845874207495214}],
                 content="""In this chapter, we will delve into the challenges faced by Large Language Models (LLMs) in terms of planning capabilities. As mentioned earlier, the previous chapter introduced the rise of LLMs and their wide application in various tasks. However, despite their impressive performance, LLMs still face significant challenges in planning, which hinder their ability to perform complex reasoning and decision-making tasks. According to the literature, LLMs lack an internal world model to predict world states and simulate long-term outcomes of actions, limiting their ability to perform deliberate planning <sup>14</sup>. Furthermore, studies have shown that LLMs struggle to follow complex user instructions, addressed by fine-tuning models on instruction tuning datasets <sup>10</sup>. 

One of the main challenges in LLMs' planning capabilities is the lack of an internal world model. This limitation prevents LLMs from accurately predicting the consequences of their actions and making informed decisions based on long-term outcomes. As a result, LLMs often struggle to generate executable plans and may produce plans that are incomplete or incorrect <sup>14</sup>. Additionally, LLMs' inability to follow complex user instructions further hampers their planning abilities, as they may not be able to understand and execute tasks that require multiple steps or conditional reasoning <sup>10</sup>.

Another challenge faced by LLMs in planning is their difficulty in making predictions specific to tasks or users due to limitations in fine-tuning and prompt size/context windows <sup>11</sup>. This limitation restricts the flexibility and adaptability of LLMs in planning, as they may not be able to generate plans that are tailored to specific tasks or users. Furthermore, the issue of compositionality in multimodal models has received significant attention, with multiple evaluation benchmarks proposed <sup>10</sup>. LLMs often struggle to understand and generate plans that involve complex compositions of actions and objects, which limits their ability to solve intricate planning problems.

To address these challenges, researchers have explored various approaches. One approach is to fine-tune LLMs on instruction tuning datasets to improve their ability to follow complex user instructions <sup>10</sup>. Another approach is to incorporate object labels as input or employ costly multi-turn reasoning to address object hallucinations in multimodal models <sup>10</sup>. Additionally, training selection functions on top of LLMs has been shown to tackle the challenge of abstaining from answering <sup>10</sup>. These approaches aim to improve LLMs' planning abilities by addressing specific limitations and enhancing their reasoning and decision-making capabilities.

In summary, LLMs face significant challenges in planning, including the lack of an internal world model, difficulty in following complex user instructions, and limitations in making predictions specific to tasks or users. These challenges hinder LLMs' ability to perform complex reasoning and decision-making tasks. However, ongoing research efforts are exploring various approaches to address these limitations and improve LLMs' planning capabilities. In the next chapter, we will discuss methods to enhance LLMs' planning abilities, including knowledge distillation and other techniques.""")
    s4 = Section(section_identifier="2", title="LLMs规划能力的基础",
                 description='介绍了LLMs规划能力的基础知识，包括LLMs的架构和原理、预训练和微调、推理和决策能力，以及规划能力评估方法。',
                 reference='',
                 content="""Large Language Models (LLMs) have emerged as powerful tools in natural language processing, capable of understanding and generating human-like text. However, their planning capabilities, particularly in complex reasoning and decision-making tasks, remain a significant challenge. This chapter delves into the foundational aspects of LLMs' planning abilities, exploring their architecture, pre-training and fine-tuning methods, and the evaluation of their planning capabilities.

### LLMs' Architecture and Principles

LLMs are primarily based on the Transformer model, which utilizes the self-attention mechanism to capture long-range dependencies in sequences<sup>20</sup>. The Transformer architecture consists of embedding and unembedding layers, with a series of transformer layers in between, each containing attention heads and an MLP layer<sup>27</sup>. The self-attention mechanism is built on the notion of queries (Q), keys (K), and values (V), which are linear projections of the token embeddings<sup>20</sup>. This mechanism allows the model to learn long-range interactions across elements in a sequence, enabling it to capture complex dependencies between tokens<sup>28</sup>. Furthermore, the Transformer model incorporates positional embeddings to inject position information into the attention mechanism, addressing the limitation of handling longer inputs<sup>21</sup>. Studies show that the self-attention mechanism in Transformers has quadratic time and memory complexity, limiting their scalability for long sequences<sup>28</sup>. To address this issue, various approaches have been proposed, such as sparse attention, linearized attention, and approximation methods<sup>22</sup>. These approaches aim to reduce the computational cost while maintaining the performance of the model. In summary, the architecture and principles of LLMs, particularly the Transformer model and the self-attention mechanism, play a crucial role in capturing long-range dependencies and enabling complex reasoning. Understanding these concepts is essential for addressing the challenges in LLMs' planning capabilities and enhancing their performance.

### Pre-training and Fine-tuning of LLMs

Pre-training is a fundamental step in developing LLMs, where the models are trained on a vast corpus of diverse and unlabeled data to acquire broad linguistic understanding. This process equips LLMs with the ability to understand and generate human-like text. Two main pre-training methods for LLMs in the natural language domain are Masked Language Modeling (MLM) and Next Token Prediction (NTP). MLM involves masking a portion of the input text and training the model to predict the masked tokens, while NTP focuses on predicting the next token in a sequence. These pre-training methods enable LLMs to learn patterns, relationships, and representations from large-scale unlabeled data, providing a solid foundation for planning abilities.

Fine-tuning, on the other hand, involves training pre-trained LLMs on task-specific recommendation datasets to specialize knowledge and parameters. This step allows LLMs to adapt their capabilities to specific planning tasks. Fine-tuning strategies include full-model fine-tuning and parameter-efficient fine-tuning. Full-model fine-tuning changes the entire model weights, while parameter-efficient fine-tuning changes only a small part of weights or develops trainable adapters. These fine-tuning strategies enable LLMs to learn task-specific knowledge and improve their planning abilities.

The effectiveness of pre-training and fine-tuning methods in enhancing LLMs' planning capabilities has been demonstrated in various studies. For example, the PTUM model proposes Masked Behavior Prediction (MBP) and Next K Behavior Prediction (NBP) to model user behaviors in recommender systems, showcasing the impact of pre-training on planning abilities. Similarly, the M6 model adopts text-infilling and auto-regressive language generation objectives for pre-training, further highlighting the importance of pre-training in planning tasks. Additionally, the P5 model uses multi-mask modeling and mixes datasets of various recommendation tasks for pre-training, enabling zero-shot generation ability, which is crucial for planning in diverse domains.

In conclusion, pre-training and fine-tuning are essential steps in developing LLMs with strong planning capabilities. Pre-training equips LLMs with broad linguistic understanding, while fine-tuning allows them to specialize knowledge and adapt to specific planning tasks. By leveraging these methods, LLMs can overcome the challenges they face in planning abilities and exhibit remarkable performance in various domains. In the next chapter, we will explore the methods of improving LLMs' planning capabilities, including prompt learning, knowledge distillation, and world models.""")
    s5 = Section(section_identifier="2_1", title="LLMs的架构和原理",
                 description='详细介绍了LLMs的架构和原理，例如Transformer模型、自注意力机制等，以及它们在LLMs中的作用。',
                 reference=[
                     {
                         'chunk_content': '# I CE FORMER : A CCELERATED I NFERENCE WITH LONG -SEQUENCE TRANSFORMERS ON CPU S\nYuzhen Mao, Martin Ester, Ke Li   \nSchool of Computing Science, Simon Fraser University   \nBurnaby, BC V5A 1S6, Canada   \n{yuzhenm,ester,keli }@sfu.ca\n\n# A BSTRACT\nOne limitation of existing Transformer-based models is that they cannot handle very long sequences as input since their self-attention operations exhibit quadratic time and space complexity. This problem becomes especially acute when Transformers are deployed on hardware platforms equipped only with CPUs. To address this issue, we propose a novel method for accelerating self-attention at inference time that works with pretrained Transformer models out-of-the-box without requiring retraining. We experiment using our method to accelerate various long-sequence Transformers, including a leading LLaMA 2-based LLM, on various benchmarks and demonstrate a speedup of $2.73\\times-7.63\\times$ while retaining $98.6\\%-99.6\\%$ of the accuracy of the original pretrained models. The code is available on our project website at https://yuzhenmao.github.io/IceFormer/ .\n\n# 1 I NTRODUCTION\nTransformers (Vaswani et al., 2017) have powered incredible advances in NLP, as exemplified by large language models (LLMs) such as GPT-4 and LLaMA 2. Increasingly LLMs are applied to exceptionally long input sequences, which enables many exciting applications such as long-form content creation, extended conversations, and large document search and analysis (OpenAI, 2023; Anthropic, 2023). While LLMs can be feasibly trained with expensive hardware accelerators (e.g. GPUs), they need to be deployed on commodity devices, which may only be equipped with CPUs.  \n\nHowever, it is currently challenging to deploy LLMs on CPUs due to their high computation cost (Dice & Kogan, 2021). A significant computational bottleneck arises from the self-attention mechanism that is integral to Transformers – both time and space complexity are quadratic in the sequence length. This problem is exacerbated in the context of LLMs, which are often used on very long sequences.  \n\nTo handle long input sequences, there has been substantial research into reducing the quadratic time complexity of self-attention – these methods are collectively known as efficient Transformers .However, many do not meet the needs of LLMs and are therefore difficult to apply to LLMs.  \n\nAn ideal acceleration method for LLMs should satisfy four criteria: (1) No retraining – the method should not require the model to be retrained, given the enormous computational expense of training LLMs; (2) Generality – the method should be applicable to a variety of LLMs, rather than just those trained with particular constraints built-in; (3) High accuracy – the method should not introduce large approximation errors, since LLMs have many attention layers and so errors from earlier layers can compound; (4) Fast inference – the method should achieve fast test-time performance.  \n\nSatisfying all these criteria simultaneously is difficult, and to our knowledge no existing methods can do so. For example, Transformers with fixed attention patterns, e.g., Longformer (Beltagy et al., 2020), require retraining the model before they can be used. Reformer (Nikita et al., 2020) requires keys to be normalized – this requirement is not met in most pretrained models. Nyströmformer (Xiong et al., 2021) and LARA (Zheng et al., 2022) do not support causal masks, which are commonly found in LLMs. Low-rank methods such as Performer (Choromanski et al., 2020) introduce substantial approximation errors, especially when they are not retrained/finetuned.  \n\n  \nFigure 1: Comparison between Transformer (Vaswani et al., 2017) (top row) and the proposed method, IceFormer (bottom row). We illustrate with one query and $k=2$ in $k$ -NNS. In the two attention matrices presented, the top-2 largest attention weights in each row are represented by a dark color. The remaining attention weights are shown in a pale color in the vanilla attention matrix, and are set to zero (depicted in white) in the sparse attention matrix.  \n\nIn this paper, we propose an acceleration method, which we dub IceFormer due to its ability to be applied directly in frozen models without retraining, that simultaneously satisfies the above four criteria. Specifically, IceFormer (1) does not require retraining, (2) can be applied to most LLMs, (3) can approximate vanilla attention accurately, and (4) achieves significantly faster inference speeds compared to existing methods. We illustrate our method in comparison to the Transformer in Figure 1. As shown, the Transformer computes the attention weights $a_{i j}$ for every possible combination of query $q_{i}$ and key $k_{j}$ (Phase 1) and exhaustively enumerates all value vectors $v_{j}$ for each query (Phase 2). In contrast, our method takes advantage of sparsity of the attention matrix and only computes the highest attention weights and enumerates only the value vectors associated with them.  \n\nWe conduct experiments on CPUs on the LRA (Tay et al., 2020), ZeroSCROLLS (Shaham et al., 2023), and LongEval (Li et al., 2023) benchmarks. Across all three benchmarks, IceFormer demonstrates substantially faster inference speeds than existing methods while attaining almost no accuracy loss compared to the Transformer. On the LRA benchm n average IceFormer achieves a $7.63\\times$ speedup relative to the Transformer while retaining $98.6\\%$ of its accuracy. Compared to the best efficient Transformer with comparable accuracy for each t eFormer is on average $3.04\\times$ faster. On the ZeroSCROLLS benchmark, IceFormer leading LLaMA 2-based LLM while retaining $99.6\\%$ of its accuracy. es a $2.73\\times$ ×speedup on average compared to a',
                         'paper_title': 'IceFormer: Accelerated Inference with Long-Sequence Transformers on CPUs',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 0,
                         'chunk_ext_id': 454895461673218688},
                     {
                         'chunk_content': '# 2.1 Transformers\nSelf-attention. Transformers have revolutionized natural language processing (NLP) through the mechanism of self-attention [ 17 ], which helps the model to learn long-range interactions across difelements (repr ented by to ens) in a sequence. Consider a sequence $\\mathbf{\\check{\\boldsymbol{X}}}=[\\mathbf{x}^{1}\\;\\mathbf{x}^{2}\\;\\ldots\\;\\mathbf{x}^{N}]^{T}\\in$ $\\mathbb{R}^{N\\times d}$ consisting of $N$ tokens of d-dimensions. The self-attention mechanism is built on the notion of queries $Q$ , keys $K$ , and values $V$ , which are linear projections of the token embeddings. Let $Q\\,=\\,X W_{Q},K\\,=\\,X W_{K},V\\,=\\,X W_{V}$ , where $Q~\\in~\\dot{\\mathbb{R}}^{N\\times d_{q}}$ ,$K\\,\\,\\in\\,\\,\\mathbb{R}^{N\\times d_{k}}$ ,$V\\,\\,\\in\\,\\,\\mathbb{R}^{N\\times\\widecheck{d_{v}}}$ . The attention operation can be written as:  \n\n$$\n\\mathrm{Attention}(Q,K,V)=\\mathrm{softmax}\\left(\\frac{1}{\\sqrt{d_{k}}}Q K^{T}\\right)V,\n$$  \n\nwhere softmax $(\\cdot)$ denotes a row-wise softmax normalization function.  \n\nMulti-head self-attention (MSA). Rather than only learning one set of keys, queries, and values for our tokens, MSA allows each head to find different patterns in the data that are useful for inference. Each of the $h$ heads provides a $d_{v}$ dimensional output, and the outputs from all heads are concatenated into a $d_{v}\\cdot h$ vector, which is further projected to produce the final representation.  \n\n  \nFigure 1: Embedded Interaction Transformer ( EIT ). (A) A traditional state-space view would treat the collective dynamics as a population right from the beginning, and use a population encoder to learn how the dynamics progress along time. (B) EIT learns dynamic embeddings of each channel with an individual encoder at the beginning. After embedding each channel’s dynamics, we feed them into an interaction encoder to build a population representation. The two encoders work together to build a representation space that is richer than that of the traditional method. (C) The detailed architecture: EIT consists of an individual transformer that processes data for each individual, an interaction transformer that processes embeddings at each timepoint, and two projection modules at the end of both transformers.  \n\nThe full operation of a transformer of $L$ layers can be written as below:  \n\n$$\n\\begin{array}{r l}&{Z_{0}=\\big[\\mathrm{Embed}(\\mathbf{x}^{1}),\\mathrm{Embed}(\\mathbf{x}^{2}),\\cdot\\cdot\\cdot\\mathrm{Embed}(\\mathbf{x}^{N})\\big]+\\mathbf{E}_{\\mathrm{pos}}}\\\\ &{Z_{\\ell+1}=Z_{\\ell}+\\mathrm{MSA}(Z_{\\ell})+\\mathrm{FF}(Z_{\\ell}+\\mathrm{MSA}(Z_{\\ell})),\\;\\ell=\\{0,\\ldots,L-1\\}}\\end{array}\n$$  \n\nwhere $Z_{0}$ is the summation of the individual embedding of each data token and the positional embedding $\\mathbf{E}_{\\mathrm{pos}}$ that helps to retain the positional information, and each transformer layer is the combination of the MSA operation $({\\mathrm{MSA}}(\\cdot))$ , the projection $(\\mathrm{FF}(\\cdot))$ , and residual connections.  \n\nTransformers for multi-channel time-series. Multi-channel time-series are a natural candidate for modeling with the transformer architecture. One common approach of modeling many time-varying data streams with transformers is to first aggregate features across all channels into a combined representation at the beginning of the model. The population dynamics is thus learnt with the resulting embedding via a temporal transformer for the purpose of inference. There are many complex ways to create this embedding: [ 18 ] and [ 19 ] extract embeddings from multivariate physical systems with Koopman operators before feeding the resulting representation into a temporal transformer; [ 20 ]use a graph neural network to embed interconnected-structures to perform skeleton-based action recognition; [21] use a convolutional architecture to extract image embeddings before feeding them into a transformer.  \n\nAnother approach is to re-design the attention block such that the attention operation is computed both along the temporal and spatial dimension. Many variants of this ‘spatial-temporal’ attention block have been shown effective: [ 22 ] propose a non-autoregressive module to generate queries for time series forecasting; [ 23 ] use stacked spatial-temporal modules for traffic flow forecasting; GroupFormer [24 ] embed spatial and temporal context in parallel to create a clustered attention block for group activity recognition. While our approach also makes use of the high-level idea of separating spatial (individual) and temporal information, crucially, we restrained the direct computation between the spatially-related attention map and the temporally-related attention map, which yields a representation of the individual which is completely free of spatial interactions.  \n\nSpacetime attention in video transformers. With the advances of the Vision Transformer [ 25 ]as a new way to extract image embeddings, many ‘spatial-temporal transformer’ architectures have been developed in the video domain [ 26 –28 ]. These works explore and propose interesting solutions for how to organize spatial attention and temporal attention with either coupled (series) [ 28 ] and factorized (parallel) attention blocks [ 26 ], as well as how to create better tokens for videos by creating three-dimensional spatio-temporal ‘tubes’ as the tubelet tokenizations [ 26 ]. However, these methods leverage inductive biases that are specific to images and videos, and do not handle the potentially separable channels that we are interested in.  \n\nObject-centric representation learning. There are many representation learning methods in video [29 –32 ], physics-guided complex systems [ 33 ], and behavior modeling [ 34 ,35 ] that also aim to learn interactions between discrete objects observed in the data. In these approaches, interactions between different objects are typically learned in a joint manner by combining the inferred representation of the objects into a common representation. In contrast, our framework aims to decompose dynamics into two parts, forming a representation of the dynamics of each individual source or object in addition to a representation of the interactions across many sources. Thus, one could imagine using our approach to build enhanced representations of object dynamics in other vision and behavioral neuroscience applications [ 30 ,34 ]. It would be interesting to see if an enhanced or individual representation could be used to further improve the performance of upstream object localization and inference approaches.',
                         'paper_title': 'Seeing the Forest and the Tree: Building Representations of Both Individual and Collective Dynamics with Transformers.',
                         'source_name': 'NeurIPS', 'year': '2022', 'chunk_id': 1,
                         'chunk_ext_id': 454967408951559304},
                     {
                         'chunk_content': '# 2.2 Positional Encoding of Transformers\nThe attention mechanism of Transformers is defined as:  \n\n$$\n\\mathrm{Attn}\\left(\\mathbf{X},\\mathbf{C}\\right)=\\sigma\\left({\\frac{Q(\\mathbf{X})K(\\mathbf{C})^{\\intercal}}{\\sqrt{d}}}\\right)V(\\mathbf{C})\n$$  \n\nwhere $\\textbf{X}\\in\\mathbb{R}^{l\\times d}$ is the query sequence with length l,$\\mathbf{C}\\in\\mathbb{R}^{m\\times d}$ ∈is the context sequence with $m$ $\\sigma(\\cdot)$ $Q,\\,K$ ,$V:\\mathbb R^{d}\\to\\mathbb R^{d}$ projecting inputs into the space of query, key and →is a linear transformation function value respectively. Since the attention function is ignorant of the position information of sequence, the Transformer model uses a method that added a special embedding to token embeddings on input of the first layer, called Positional Embedding, to inject position information. Vaswani et al. (2017 )proposed Sinusoidal Positional Embedding, which is a non-trainable constant embedding computed from trigonometric functions.  \n\nOn the other hand, BERT ( Devlin et al. ,2019 )uses trainable positional embeddings instead of constant embeddings. It is adaptable to training data, but has limitations such as being unable to handle longer inputs than those used for training and not being translation-invariant.  \n\nRelative position methods have been studied, for solving these problems ( Shaw et al. ,2018 ;Raffel et al. ,2020 ). It learns parameters representing the relative distance between tokens and utilizes them to calculate the attention score. However, It is slower than the sinusoidal approach and uses extra memory and parameters ( Press et al. ,2021 ).  \n\nPress et al. (2021 ) pointed out that previous methods are vulnerable to extrapolation and proposes ALiBi, a modified attention function for selfattention as follows:  \n\n$$\n\\mathrm{ALiBi}\\left(\\mathbf{X}\\right)=\\sigma\\left(\\frac{Q(\\mathbf{X})K(\\mathbf{X})^{\\intercal}}{\\sqrt{d}}-\\mathbf{D}^{\\intercal}\\right)V(\\mathbf{X})\n$$  \n\n$$\n\\mathbf{D}_{i,j}=\\left\\{\\begin{array}{l l}{m\\times(i-j),}&{\\mathrm{for}\\;i\\geq j}\\\\ {\\infty,}&{\\mathrm{for}\\;i<j}\\end{array}\\right.\n$$  \n\nwhere $m$ is a head-specific positive real-numberhyperparameter and $\\mathbf{D}\\in\\mathbb{R}^{l\\times l}$ is a distance matrix.\n\n# 2.3 Pretraining objectives for Question Answering\nTo pretrain a language model, an appropriate training objective that fully exploits the language understanding should be defined. Masked LM ( Devlin et al. ,2019 ), for example, replaces $15\\%$ of the input tokens with a mask token or a random token and forces the model to denoise it. After pre-training is complete, the last hidden representations of the model contains information to restore the replaced token to the original one and it is useful to transfer this information for other NLP tasks as well, such as question answering.  \n\nHowever, Masked LM (MLM) is suboptimal for extractive QA task. Joshi et al. (2020 ) proposed SpanBERT, which is pretrained by a span-level masking scheme whose lengths follows geometric distribution and it outperformed BERT with MLM in the most of tasks, especially extractive QA. They proved that training objective predicting spans rather than tokens generates better representations especially for span selection tasks.  \n\nRam et al. (2021 ) introduced Recurring Span Selection (RSS), a novel pre-training objective which is better aligned to QA tasks. In RSS, each recurring text span, except for one to be used as the golden answer span, is masked with a special token, [QUESTION], and a model is trained to point to the position of the golden answer span using the representations from each [QUESTION] token. Because this pre-training task is so similar to the real QA task, the model trained in this objective outperforms models with other pre-training objectives in both the few-shot and high-resource settings for QA.\n\n# 2.4 Datasets of Question Answering for Longer Documents\nThe most widely used English QA dataset is SQuAD ( Rajpurkar et al. ,2016 ), but it’s insufficient to test understanding of long contexts because of its short paragraph. Thus, for QA of longer documents, other datasets are considered. Typical examples are Natural Questions ( Kwiatkowski et al. ,2019 )and TriviaQA ( Joshi et al. ,2017 ), which provide a whole Wikipedia page as the document. NarrativeQA ( Koˇciský et al. ,2018 ), whose documents consist of movie scripts and books, is another example. Recently, Pang et al. (2022 ) introduced QuALITY, a multiple-choice QA dataset comprised of around 5000 tokens of documents gathered from various sources such as Project Gutenberg and Open American National Corpus.  \n\nFor Korean QA datasets, the most standard is KorQuAD 1.0 and KorQuAD 2.0, which is comparable to SQuAD in English. The construction and characteristics of the dataset in KorQuAD 1.0 (Lim et al. ,2019 ) are nearly identical to those of SQuAD, except that it is in Korean. Therefore, like SQuAD, KorQuAD 1.0 is not suitable for evaluating QA for long documents. To evaluate understanding of longer documents, KorQuAD 2.0 ( Kim et al. ,2019 ) is often used. Since it provides the whole Wikipedia page as a single context without trimming and the page includes not only text but also HTML components such as tables and lists, structural understanding of long HTML documents is required to conquer it.\n\n# 3 LittleBird Architecture\nIn this section, we describe the architecture of LittleBird model. Basically, the model can be viewed as a composition of several key ideas including sliding window attention from BigBird ( Zaheer et al. ,2020 ), linear bias to attention from ALiBi ( Press et al. ,2021 ) and pack and unpack attention from LUNA ( Ma et al. ,2021 ).',
                         'paper_title': 'LittleBird: Efficient Faster Longer Transformer for Question Answering',
                         'source_name': 'EMNLP', 'year': '2022', 'chunk_id': 1,
                         'chunk_ext_id': 454919303306433332},
                     {
                         'chunk_content': '# 2 RELATED WORK\nEfficient Transformer Variants Among the models to approximate full self-attention, Longformer (Beltagy et al., 2020) $(O(N))$ sparsifies the full self-attention into three attention patterns of sliding window, dilated sliding window, and global attention. BigBird (Zaheer et al., 2020) $(O(N))$ combines global attention, local attention, and random attention. Poolingformer (Zhang et al., 2021a) $(O\\bar{(}N))$ uses a two-level attention schema, with the first level using a smaller sliding window to aggregate local information and the second level using a larger window with pooling attention to reduce time and memory cost. Focal Transformer (Yang et al., 2021) $(O(N))$ uses both fine-grained local interactions and coarse-grained global interactions to balance efficiency and effectiveness of capturing short- and long-range dependencies. Transformer-LS (Zhu et al., 2021) $(O(N))$ approximates the full attention by aggregating long-range attention via dynamic projections and short-term attention via segment-wise sliding window. H-Transformer-1D (Zhu & Soricut, 2021) $(O(N))$ exploits a matrix structure similar to Hierarchical Matrix. AdaMRA (Zhang et al., 2021b) $(O(N))$ leverages a multi-resolution multi-head attention mechanism and kernel attention. Luna (Ma et al., 2021) $(O(N))$ introduces an additional fixed length sequence served as query to attend to the original input while the output is served as key and value to attend to the original input. Apart from the sparse attention models, other approximation approaches explore locality sensitive hashing and matrix approximation methods. Reformer (Kitaev et al., 2020) $O(N l o g N)$ replaces self-attention with locality sensitive hashing. Performer (Choromanski et al., 2020; 2021) $(O(N))$ approximates softmax attention by leveraging random features. Linformer (Wang et al., 2020a) approximates the self-attention matrix with a low-rank factorization. Nystr¨omformer (Xiong et al., 2021) $(O(N))$ approximates the softmax attention with the Nystr¨om method by sampling a subset of columns and rows. However, these approximation methods have strengths on certain tasks and may cause accuracy degradation on many other tasks.  \n\nOur work is in another line of research on replacing self-attention with more efficient token mixing mechanisms. MLP-Mixer (Tolstikhin et al., 2021) $(O(N^{2}))$ applies two separate linear transformations on the hidden state dimension and the sequence dimension. FNet (Lee-Thorp et al., 2021) $(O(N l o g N))$ replaces the self-attention sublayer with 2D-FFT mixing sublayer. AFTlocal/conv (Zhai et al., 2021) $(O(s N),s~<~N)$ first combines the key and value with a set of learned position biases and then combines the query with this result via element-wise multiplication. Fastformer ( $\\mathrm{W}\\mathbf{u}$ et al., 2021) $(O(N))$ first models global context via additive attention then models interactions between global context and input representations through element-wise product. Shared Workspace (Goyal et al., 2021) $(O(N))$ proposes the idea of using a shared bottleneck to tackle the problem of quadratic dependence in attention.  \n\nPre-training Tasks It has been observed that both underlying model architecture and pre-training are crucial to performance of PLMs. BERT (Devlin et al., 2019) with a Transformer encoder is pretrained with masked language modeling (MLM) and next sentence prediction (NSP) tasks on largescale unlabeled text corpora including the English Wikipedia and BooksCorpus. MLM predicts the masked token from context. NSP predicts whether a sentence pair is contiguous or not in the original source. Many approaches are proposed to improve these two tasks and show that more challenging pre-training tasks may help PLMs learn better and more transferable language representations.  \n\nWhole word masking (WWM) (Devlin et al., 2019; Cui et al., 2019) and SpanBERT (Joshi et al., 2020) outperform BERT on many tasks. WWM simultaneously masks all WordPiece tokens belonging to the same word and forces the model to recover a complete whole word. SpanBERT randomly samples contiguous spans inside of individual tokens and augments MLM with a new task to predict the entire masked span. RoBERTa (Liu et al., 2019) reports ineffectiveness of NSP and removes it from pre-training. ALBERT (Lan et al., 2020) replaces NSP with a sentence-order prediction (SOP) task to predict whether two consecutive sentences are in the right order or not, for learning fine-grained inter-sentence coherence. StructBERT (Wang et al., 2020b) extends SOP to a new sentence structural objective (SSO) as a ternary classification on two sentences $(S_{1},S_{2})$ to decide whether $S_{1}$ precedes or follows $S_{2}$ or the two sentences are noncontiguous. More challenging tasks for learning inter-sentence relations and document/discourse structures (Iter et al., 2020; Lee et al., 2020; Ding et al., 2021) show promising performance improvements on PLMs.\n\n# 3 MODEL\nOur work is inspired by the External Attention (EA) approach proposed in (Guo et al., 2021) for visual tasks. A $\\pmb{x}\\;=\\;\\{\\bar{x_{1}},...,x_{N}\\}$ are mapped to an embed ng matrix denoted by $\\pmb{H}(\\dot{\\in}\\mathrm{~\\mathbb{R}}^{N\\times d})\\ =\\ \\{h_{1},...,h_{N}\\}$ ∈{}, where Nis the sequence length and dis the hidden dimension. EA uses two linear layers to implement external and shared memories, which facilitates learning correlations across all samples and hence serves strong regularization to and improves generalization of the attention mechanism with linear complexity. We simplify EA into multi-layer perceptron and softmax , and observe that by infusing the sequence-level information into each token through the denominator term $\\sum_{n=1}^{N}e^{\\dot{h}_{n}}$ ,softmax provides context modeling capabilities. However, softmax involves calculations of exponents, which is still slow. Consequently, we consider using pooling as an alternative to capture contextual information with significantly reduced complexity. We propose a Pooling Network $(\\bf P_{0}N e t)$ as a drop-in replacement for the self-attention sublayer in Transformer, as shown in Figure 1. PoNet models different levels of contextual information through a multi-granularity pooling (MP) block consisting of three components, namely, global aggregation (GA), segment max-pooling (SMP), and local maxpooling (LMP). These pooling features are then aggregated through pooling fusion.  \n\n  \nFigure 1: The illustration of the PoNet model architecture. The right enlarged view shows multigranularity pooling (GA, SMP, LMP) and pooling fusion (Section 3).',
                         'paper_title': 'PoNet: Pooling Network for Efficient Token Mixing in Long Sequences',
                         'source_name': 'ICLR', 'year': '2022', 'chunk_id': 1,
                         'chunk_ext_id': 454939008330377604},
                     {
                         'chunk_content': '# UNDERSTANDING I N-C ONTEXT LEARNING IN TRANSFORMERS AND LLM S BY LEARNING TO LEARN DISCRETE FUNCTIONS\nSatwik Bhattamishra ∧† Arkil Patel ∨Phil Blunsom ∧⊕ Varun Kanade ∧† ∧University of Oxford ∨Mila and McGill University ⊕Cohere\n\n# A BSTRACT\nIn order to understand the in-context learning phenomenon, recent works have adopted a stylized experimental framework and demonstrated that Transformers can learn gradient-based learning algorithms for various classes of real-valued functions. However, the limitations of Transformers in implementing learning algorithms, and their ability to learn other forms of algorithms are not well understood. Additionally, the degree to which these capabilities are confined to attention-based models is unclear. Furthermore, it remains to be seen whether the insights derived from these stylized settings can be extrapolated to pretrained Large Language Models (LLMs). In this work, we take a step towards answering these questions by demonstrating the following: (a) On a test-bed with a variety of Boolean function classes, we find that Transformers can nearly match the optimal learning algorithm for ‘simpler’ tasks, while their performance deteriorates on more ‘complex’ tasks. Additionally, we find that certain attention-free models perform (almost) identically to Transformers on a range of tasks. (b) When provided a teaching sequence , i.e. a set of examples that uniquely identifies a function in a class, we show that Transformers learn more sample-efficiently. Interestingly, our results show that Transformers can learn to implement two distinct algorithms to solve a single task, and can adaptively select the more sample-efficient algorithm depending on the sequence of in-context examples. (c) Lastly, we show that extant LLMs, e.g. LLaMA-2, GPT-4, can compete with nearest-neighbor baselines on prediction tasks that are guaranteed to not be in their training set.',
                         'paper_title': 'Understanding In-Context Learning in Transformers and LLMs by Learning to Learn Discrete Functions',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 0,
                         'chunk_ext_id': 454845897061735214},
                     {
                         'chunk_content': '# 2.2 Transformer Architectures\nThe Transformer architecture was initially developed for Natural Language Processing tasks such as Machine Translation [1]. Variations of this architecture have since found applications in many domains of Deep Learning (e.g., Vision Transformer for Computer Vision tasks [2]). The Transformer architecture is based on the Scaled Dot-Product Attention operation (also known as Self-Attention when the same tokens are used for query, key and value) which aims to capture the interactions between the entities in a sequence. In order to allow the Scaled Dot-Product Attention to model more than one type of relationship between different entities, it is extended to Multi-Head Attention.  \n\nOne critical issue with the Self-Attention mechanism is the quadratic growth in memory and computational complexity with the number of entities $n$ in a sequence. Accordingly, a great deal of research has been carried out to improve the efficiency of Transformers [21]. Block-wise or Chunking methods such as Image Transformer [22] and Vision Transformer [2] group up entities of a local receptive field into down to O$O(n_{b}^{2})$ gle bloc , where n$n_{b}~<~n$ is the number of blocks. ng the ${\\mathcal{O}}(n^{2})$ complexity Techniques such as sliding windows, dilation and pooling can be used to achieve a similar effect [23]. As opposed to a fixed grouping scheme, approaches such as the Reformer [7] learn the best groupings in a data-driven manner. By using Locality-Sensitive Hashing (LSH) as similarity measure for clustering the entities, Reformer reduces the complexity to $\\mathcal{O}(n\\log n)$ .  \n\nA different paradigm aims to derive approximations of the self-attention matrix. Methods such as Linformer [24], Nystr¨omformer [9] and Performer [8] reduce the quadratic complexity from group of attention-free Multi-Layer Perceptron (MLP) based $O(n^{2})$ to a linear $\\mathcal{O}(n)$ . More recently, a approaches such as MLP-Mixer [25] and ResMLP [26] have been proposed, that strive to obtain performance similar to that of Transformers, while reducing the computational cost by removing the Self-Attention mechanism all together and employing MLPs in conjunction with transposition in order to preserve a global receptive field [27].\n\n# 3 CONTINUAL TRANSFORMERS\nThe Scaled Dot-Product Attention lies at the heart of the Transformer Encoder block. Consider the special case, where the query, key and value inputs to such a module constitute a continual stream of $d_{\\cdot}$ -dimensional tokens and we wish to perform the attention operation for each step in time over a finite window of length $n$ . The Scaled DotProduct Attention models a many-to-many relation. Accordingly, each new input token will have a retroactive impact on outputs corresponding to prior tokens. Let us consider three implementation options for the operation and derive the complexity of each.\n\n# 3.1 Regular Scaled Dot-Product Attention\nThe regular Scaled Dot-Product Attention can be written as  \n\n$$\n\\begin{array}{r l}&{\\mathrm{Att}(\\mathbf{Q},\\mathbf{K},\\mathbf{V})=\\mathbf{D}^{-1}\\mathbf{A}\\mathbf{V}}\\\\ &{\\qquad\\qquad\\quad\\mathbf{A}=\\exp\\left(\\mathbf{Q}\\mathbf{K}^{\\top}/\\sqrt{d}\\right)}\\\\ &{\\qquad\\qquad\\quad\\mathbf{D}=\\mathrm{diag}\\left(\\mathbf{A}\\mathbb{1}_{n}^{\\top}\\right),}\\end{array}\n$$  \n\nwhere $\\mathbf{Q},\\mathbf{K},\\mathbf{V}\\ \\in\\ \\mathbb{R}^{n\\times d}$ are uery, key, and value row matrices, ${\\bf A},{\\bf D}\\in\\mathbb{R}^{n\\times n},$ ∈, and 1 $\\mathbb{I}_{n}$ denotes a row-vector of nones. When a new token triple arrives in the next time-step, we can update $\\mathbf{Q},\\mathbf{K},$ , and $\\mathbf{V}$ by discarding their oldest token and prepending the new one in a first-in-first-out (FIFO) manner. Then the computational steps in Eqs. (1) to (3) are repeated once again.  \n\nWhen employing this formulation, each time-step results in $2n^{2}d+2\\dot{n}d$ multiplications, $2n^{2}d\\mathrm{~-~}n d\\mathrm{~-~}n$ additions, and n$n^{2}$ exponentiations as accounted for in Table 1. This amounts to a time complexity of $O(n^{2}d)$ and a ${\\mathcal{O}}(n^{2})$  \n\nmemory complexity originating from the transient featuremap A . Furthermore, tant-sized cache fe$3(n\\!-\\!1)d$ is needed to store the $n-1$ −latest tokens in Q,Kand V.  \n\nClearly, there is a considerable amount of redundancy in the computation of $\\mathbf{Q}\\mathbf{K}^{\\top}$ that we could cache and reuse in subsequent steps. That said, caching $\\mathbf{Q}\\mathbf{K}^{\\top}$ directly comes with a high memory penalty of $(n-1)^{2}$ . Fortunately, we can devise another computational scheme, which is laid out in Section 3.2.  \n\nTABLE 1: Floating Point Operations for the Scaled DotProduct Attention in Eqs. (1) to (3). $\\mathbf{D}^{-1}(\\cdot)$ can be efficiently computed as element-wise multiplication with AV .  \n\n<html><body><table><tr><td></td><td>Mul.</td><td>Add</td><td>Exp</td></tr><tr><td>Eq. (1)</td><td>n²d+nd</td><td>nd(n - 1)</td><td>0</td></tr><tr><td>Eq. (2)</td><td>n²d+nd</td><td>n²(d - 1)</td><td>n2</td></tr><tr><td>Eq. (3)</td><td>0</td><td>n(n -1)</td><td>0</td></tr></table></body></html>\n\n# 3.2 Continual Retroactive Dot-Product Attention\nWe can compute the Scaled Dot-Product Attention, $\\mathbf{D}^{-1}\\mathbf{A}\\mathbf{V},$ ,in a step-wise manner using only the latest query, key, and value steps, qne $\\mathbf{\\Lambda}_{\\mathrm{w}},\\mathbf{k}_{\\mathrm{new}},\\mathbf{v}_{\\mathrm{new}}\\ \\in\\ \\dot{\\mathbb{R}}^{1\\times d},$ , as inputs alongside appropriately cached partial results.  \n\nThe softmax normalisation with ${\\bf D}^{-1}$ can be efficiently implemented via column-aligned element-wise multiplications (denoted $\\odot$ ereafter) of a column-vector $\\mathbf{d}=\\mathbf{\\dot{A}}\\mathbb{I}_{n}^{\\top}$ .$\\mathbf{d}_{\\mathrm{mem}}=\\mathbf{A}_{\\mathrm{prev}}^{(-n+1:-1)}\\mathbb{1}_{n-1}^{\\top},$ the update as If we cache the $n-1$ −values of the previous step tokens, i.e. −, alongside $\\mathbf{Q}$ and $\\mathbf{K}$ , we can define  \n\n$$\n\\begin{array}{r}{\\mathbf{d}^{(-n+1:-1)}=\\mathbf{d}_{\\mathrm{mem}}^{(-n+2:0)}-\\exp\\left(\\mathbf{Q}_{\\mathrm{mem}}\\mathbf{k}_{\\mathrm{old}}^{\\top}\\right)}\\\\ {+\\exp\\left(\\mathbf{Q}_{\\mathrm{mem}}\\mathbf{k}_{\\mathrm{new}}^{\\top}\\right)}\\end{array}\n$$  \n\n$$\n\\mathbf{d}^{(0)}=\\exp\\left(\\frac{\\mathbf{q}_{\\mathrm{new}}}{\\sqrt{d}}\\left(\\mathbf{K}_{\\mathrm{mem}}\\parallel\\mathbf{k}_{\\mathrm{new}}\\right)^{\\top}\\right)\\mathbb{1}_{n}^{\\top},\n$$  \n\nwhere $\\mathbf{Q}_{\\mathrm{mem}}$ are the $n-1$ rior query step tokens, $\\mathbf{k}_{\\mathrm{new}}$ is the new key token, and $\\mathbf{k}_{\\mathrm{old}}$ is the key token from nsteps ago. Here we use index 0 to denote the current time step and negative indices to denote prior time steps. An update step for AV can e be defined as a function of the $n-1$ prior values $\\mathbf{AV}_{\\mathrm{mem}}$ :  \n\n$$\n\\begin{array}{r l r}{\\mathrm{\\bfA}\\mathrm{\\bfV}^{(-n+1:-1)}=\\mathrm{\\bfA}{\\bf V}_{\\mathrm{mem}}^{(-n+2:0)}-\\exp\\left({\\bf Q}_{\\mathrm{mem}}{\\bf k}_{\\mathrm{old}}^{\\top}\\right){\\bf v}_{\\mathrm{old}}}&{}&\\\\ {+\\exp\\left({\\bf Q}_{\\mathrm{mem}}{\\bf k}_{\\mathrm{new}}^{\\top}\\right){\\bf v}_{\\mathrm{new}}}\\end{array}\n$$  \n\n$$\n\\mathbf{A}\\mathbf{V}^{(0)}=\\exp\\left({\\frac{\\mathbf{q}_{\\mathrm{new}}}{\\sqrt{d}}}\\left(\\mathbf{K}_{\\mathrm{mem}}\\parallel\\mathbf{k}_{\\mathrm{new}}\\right)^{\\top}\\right)\\left(\\mathbf{V}_{\\mathrm{mem}}\\parallel\\mathbf{v}_{\\mathrm{new}}\\right)\n$$  \n\nFinally, we can compute the Continual Retroactive Attention output in the usual manner:  \n\n$$\n\\begin{array}{r}{C o R e}{\\mathrm{Att}}(\\mathbf{q}_{\\mathrm{new}},\\mathbf{k}_{\\mathrm{new}},\\mathbf{v}_{\\mathrm{new}})=\\mathbf{d}^{-1}\\odot\\mathbf{A}\\mathbf{V}.}\\end{array}\n$$  \n\nAn illustration of the update steps is shown in Fig. 1.  \n\nAs to the computational complexity, a time-step can now puted with $7n d\\,{+}2n\\,{-}\\,3d$ multiplications, $6n d\\,{+}\\,3n\\,-$ $6d-3$ Table and a −O$\\mathcal{O}(n d)$ additions, and memory complexity. s, we have a time complexity of $3n-2$ −exponentials (d O$O(n d)$ found in per step TABLE 2: Floating Point Operations for the Continual Retroactive Dot-Product Attention in Eqs. (4) to (8). The outputs of the exponentials in Eq. (4) and Eq. (5) can be reused in Eq. (6) and Eq. (7) respectively, and are omitted in the count.  \n\n  \nFig. 1: Continual Retroactive Dot-Product Attention . The query $(\\mathbf{Q}).$ , key $({\\bf K})$ , and value $({\\bf V})$ matrices are aggregated over time by caching the step vectors qnew ,$\\mathbf{k}_{\\mathrm{new}},$ and $\\mathbf{v}_{\\mathrm{new}}$ in a FIFO queue. During each step, only the entries of A associated with qnew ,${\\bf k}_{\\mathrm{new}}$ and the oldest $\\mathbf{K}$ step, ${\\bf k}_{\\mathrm{old}}$ are computed. The diagonal entries of the row-normalisation matrix $\\mathbf{D}$ as well as the AV can be updated retroactively by subtracting features corresponding to ${\\bf k}_{\\mathrm{old}}$ and adding features related to ${\\bf k}_{\\mathrm{new}}$ to the cached outputs of the previous step, $\\mathbf{D}_{\\mathrm{mem}}$ and $\\mathbf{AV}_{\\mathrm{mem.}}$ , respectively.  \n\n<html><body><table><tr><td></td><td>Mul.</td><td>Add</td><td>Exp</td></tr><tr><td>Eq. (4)</td><td>2(n - 1)d</td><td>2(n -2)d+ 2(n - 1)</td><td>2(n - 1)</td></tr><tr><td>Eq. (5)</td><td>nd+n+d</td><td>nd+(n-1)+d</td><td>n</td></tr><tr><td>Eq. (6)</td><td>2(n - 1)d</td><td>2(n - 1)d</td><td>0</td></tr><tr><td>Eq. (7)</td><td>nd</td><td>(n -1)d</td><td>0</td></tr><tr><td>Eq. (8)</td><td>nd+n</td><td>0</td><td>0</td></tr></table></body></html>',
                         'paper_title': 'Continual Transformers: Redundancy-Free Attention for Online Inference',
                         'source_name': 'ICLR', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454847971264970550},
                     {
                         'chunk_content': '# 6 CONCLUSION\nWith the widespread success of Transformers on system 1 perception tasks, it is intriguing that they could also perform well on system 2 logical reasoning problems. Adding recurrences to the baseline model already outperforms the existing methods, especially on visual Sudoku puzzles with large margins $(93.5\\%$ over enhanced SATNet’s $64.8\\%$ ), successfully addressing the issue of symbol grounding. We further improve the results by injecting underlying constraints into Transformer training so that the model can learn with fewer data, converge faster, and even improve accuracy. Our experiments show that more recurrences during training tend to yield higher test accuracy and additional recurrences during testing could also help. The number of attention blocks affects the size and modeling power of Recurrent Transformer. More attention heads lead to faster convergence, and the accuracy may decrease if the heads are too few to capture different semantic meanings.\n\n# A CKNOWLEDGEMENTS\nThis work was partially supported by the National Science Foundation under Grant IIS-2006747.   \n\n# A RECURRENT TRANSFORMER DETAILS\nFigure 4 shows a multi-layer Transformer encoder architecture ( a ) and the Recurrent Transformer architecture in our work ( b), where every dotted box denotes a self-attention block. An output layer consists of a layer normalization, a linear layer, and a softmax activation function. In ( b), all output layers share the same parameters, while every self-attention block has its own parameters.  \n\n  \nFigure 4: ( a ) Transformer encoder. ( b) Recurrent Transformer encoder.  \n\nThe Recurrent Transformer with $L$ self-attention blocks and $R$ recurrences can be formulated as follows:  \n\n$$\n\\begin{array}{r l r}{\\pmb{H}^{(r,0)}=\\pmb{H}^{(r-1,L)}}&{\\forall r\\in\\{1,\\ldots,R\\}}\\\\ {\\pmb{H}^{(r,l)}=\\mathrm{block}_{l}(\\pmb{H}^{(r,l-1)})}&{}&{\\forall r\\in\\{1,\\ldots,R\\},\\ \\forall l\\in\\{1,\\ldots,L\\}}\\\\ {\\pmb{X}^{(r,l)}=\\mathrm{softmax}(\\mathrm{layer\\_norm}(\\pmb{H}^{(r,l)})\\cdot\\pmb{W}_{o u t})}&{}&{\\forall r\\in\\{1,\\ldots,R\\},\\ \\forall l\\in\\{1,\\ldots,L\\}}\\end{array}\n$$  \n\nwhere $\\pmb{H}^{(r,l)}\\in\\mathbb{R}^{t\\times d_{h}}$ denotes den embedd ngs of $t$ input tokens after the $l$ -th (self-attent on) block in the r-th recurrent step, $b l o c k_{l}$ denotes the l-th Transformer block in the model (i.e., the l-th igure 4 ( b)), layer _norm denotes layer normalizati $\\cdot$ x multiplication, $W_{o u t}\\in\\mathbb{R}^{d_{h}\\times c}$ ∈is the weight of the t layer for $c$ classes, and $X^{(r,l)}\\in[0,1]^{t\\times c}$ ∈denotes the NN output with the hidden embedding ${\\pmb{H}}^{(r,l)}$ .  \n\nEach block lis defined on weights $\\pmb{W}_{K}^{(l)},\\pmb{W}_{Q}^{(l)},\\pmb{W}_{V}^{(l)},\\pmb{W}_{P}^{(l)}\\in\\mathbb{R}^{d_{h}\\times d_{h}}$ (for simplicity, we describe a single-head case) and a multilayer perceptron $\\mathrm{MLP}_{l}$ with output size $d_{h}$ .  \n\n$$\n\\begin{array}{r l}&{\\mathbf{K}^{(r,l)}=\\mathrm{layer}_{-}\\mathrm{norm}(H^{(r,l)})\\cdot W_{K}^{(l)}\\qquad\\qquad\\mathbf{Q}^{(r,l)}=\\mathrm{layer}_{-}\\mathrm{norm}(H^{(r,l)})\\cdot W_{Q}^{(l)}}\\\\ &{\\mathbf{V}^{(r,l)}=\\mathrm{layer}_{-}\\mathrm{norm}(H^{(r,l)})\\cdot W_{V}^{(l)}\\qquad\\qquad\\mathbf{A}^{(r,l)}=\\mathrm{softmax}(\\frac{\\mathbf{Q}^{(r,l)}(\\mathbf{K}^{(r,l)})^{T}}{\\sqrt{d_{h}}})}\\\\ &{\\qquad\\qquad\\qquad\\qquad\\mathbf{V}^{*}=\\left(\\mathbf{A}^{(r,l)}\\cdot\\mathbf{V}^{(r,l)}\\right)\\cdot W_{P}^{(l)}+H^{(r,l)}\\qquad\\qquad\\qquad\\qquad\\mathrm{bol}}\\\\ &{\\qquad\\qquad\\qquad\\mathrm{block}_{l}(H^{(r,l)})=\\mathrm{MLP}_{l}(\\mathrm{layer}_{-}\\mathrm{norm}(\\mathbf{V}^{*}))+\\mathbf{V}^{*}}\\end{array}\n$$  \n\n$\\pmb{H}^{(r,l)},\\mathbf{K}^{(r,l)},\\mathbf{Q}^{(r,l)},\\mathbf{V}^{(r,l)},\\mathbf{V}^{*}\\in\\mathbb{R}^{t\\times d_{h}}\\mathrm{~and~}\\mathbf{A}^{(r,l)}\\in[0,1]^{t\\times t}$  \n\nThe parameters are in terms of input vocabulary size $(v)$ , context size $(t)$ , number of classes $(c)$ ,hidden embedding size $(d_{h})$ , and the hidden layer size $(d_{M L P})$ of $\\mathrm{MLP}_{l}$ , which is of shape ( $d_{h}$ ,$d_{M L P},d_{h})$ .  \n\n<html><body><table><tr><td>Parameter</td><td>Value</td></tr><tr><td>U</td><td>10</td></tr><tr><td>t</td><td>81 9</td></tr><tr><td>C dh</td><td>128</td></tr><tr><td>dMLP</td><td>512</td></tr></table></body></html>  \n\nTable 4: Parameter values and counts for L1R32H4 model for symbolic Sudoku.   \n(b) Parameter Counts   \n\n\n<html><body><table><tr><td>Operation</td><td>Parameters</td><td>ParameterCount</td></tr><tr><td>Token Embedding Positional Embedding</td><td>UX dh up x4</td><td>10× 128=1280 81 × 128 =10,368</td></tr><tr><td>Multi-HeadSelf-Attention ,W(,w) (1)M） ()M</td><td>4(d² + dn) (the dn isfor bias)</td><td>4(1282 + 128)</td></tr><tr><td>Layer normalization</td><td>3 ×2dh</td><td>= 66,048 3×2×128=768</td></tr><tr><td>MLP (dh,dMLP,dh)</td><td>dhdMLP+dMLP +dMLPdh+dh</td><td>2×128×512+512 +128 = 131,712</td></tr><tr><td>Output layer Wout</td><td>dhc</td><td>128 × 9 =1,152</td></tr></table></body></html>  \n\nAs shown in Table 4, the parameters and their counts are shown. There are a total of 211,328 parameters. For SATNet(Wang et al., 2019), the number of parameters for Sudoku is 618,000 in total. This is $(n+1+a u x)\\times m$ , where $n$ is the number of input variables, aux is the number of auxiliary variables, and mis the rank of the clause matrix. The (Palm et al., 2018) work has a total of 201,194 trainable parameters, which come from the row, column, and number embeddings, and the three MLPs used for node updates, message passing, and producing output probabilities.\n\n# BMORE A BLATION STUDIES ON SUDOKU EXPERIMENTS\nB.1 EFFECTS AND VISUALIZATION OF MULTI -H EAD A TTENTION Without prior knowledge of the Sudoku game, Recurrent Transformer learns purely from $\\langle p u z z l e,s o l u t i o n\\rangle$ pairs so that each cell should pay attention to all cells in the same row, column, and self-attention heads on the SATNet dataset with matrices – they correctly pay attention to each row, column, and box, respectively. For example, the $3\\times3$ ×box through the attention me L$\\mathcal{L}_{b a s e}$ sm. We trained an L1R32 model with 1 to 4 . Figure 5 ( left ) visualizes the learned attention first row of the top-left attention matrix in Figure 5 ( left ) learns purely from data about the 9 atoms to pay attention in constraint (3) where separated into different heads if the number of heads is greater than or equal to 3 and would otherwise $\\{i,\\dots,j\\}=\\{1,\\dots,9\\}$ and $d=1$ . These attentions are clearly merge. Figure 5 ( right ) compares the whole board accuracy of these models, showing that more attention heads help accelerate convergence. The accuracy may decrease if the number of attention heads is too small to capture different semantic meanings.  \n\n  \nFigure 5: ( left ) Heatmaps of the learned 81x81 attention matrices in the L1R32 Recurrent Transformer with varying numbers of heads. ( right ) Test accuracy vs. epochs for these models.',
                         'paper_title': 'Learning to Solve Constraint Satisfaction Problems with Recurrent Transformers',
                         'source_name': 'ICLR', 'year': '2023', 'chunk_id': 6,
                         'chunk_ext_id': 454848290321755592},
                     {
                         'chunk_content': '# 5. Conclusion\nAiming to reduce the costly self-attention computations, we proposed a new model called Less-Attention Vision Transformer (LaViT). LaViT leverages the computed dependency in Multi-Head Self-Attention (MHSA) blocks and bypasses the attention computation by re-using attentions from previous MSA blocks. We additionally incorporated a straightforward Diagonality Preserving loss, designed to promote the intended behavior of the attention matrix in representing relationships among tokens. Notably, our Transformer architecture effectively captures cross-token associations, surpassing the performance of the baseline while maintaining a computationally efficient profile in terms of quantity of parameters and floating-point operations per second (FLOPs). Comprehensive experimentation has confirmed the efficacy of our model as a foundational architecture for multiple downstream tasks. Specifically, the proposed model demonstrates superiority over previous Transformer architectures, resulting in state-of-the-art performance in classification and segmentation tasks.',
                         'paper_title': 'You Only Need Less Attention at Each Stage in Vision Transformers',
                         'source_name': 'CVPR', 'year': '2024', 'chunk_id': 5,
                         'chunk_ext_id': 454849516829607000},
                     {
                         'chunk_content': '# 2. Preliminaries\nIn this section we provide background and notations, much of which is borrowed from Geva et al. (2022 ).  \n\nTransformers, MLPs. Transformer-based language models typically consists of embedding and unembedding layers $E,\\dot{U}\\in\\mathbb{R}^{|\\mathcal{V}|\\times d}$ with a series of $L$ transformer layers inbetween ( Vaswani et al. ,2017 ). Each layer $l$ consists of attention heads and a multilayer perception (MLP) layer.  \n\nGiven a input sequence $\\mathbf{w}=\\langle w_{0},...,w_{t}\\rangle$ the model first $w_{i}\\in\\mathbf{w}$ ∈$E$ . We call to crea $\\mathbf{x}_{i}$ an embedding the residual stream. $\\mathbf{x}_{i}\\in\\mathbb{R}^{d}$ ∈for each token The residual stream is then updated by attention heads and MLP blocks from subsequent layers (bias terms omitted):  \n\n$$\n\\mathbf{x_{i}^{\\ell+1}}=x_{i}^{\\ell}+\\mathtt{M L P}^{\\ell}(x_{i}^{\\ell}+\\mathtt{A t t}^{\\ell}(x_{i}^{\\ell}))\n$$  \n\nWhen needed, we specify the intermittent residual stream at layer $\\ell$ (after the attention head, before the MLP) as $\\mathbf{x}^{\\ell-m i d}$ .Per Geva et al. (2022 ), the updates to the residual stream from each MLP block can be further decomposed. Namely, MLP blocks consist of two linear transformations, with point-wise activations $\\sigma$ in-between:  \n\n$$\n\\mathtt{M L P}^{\\ell}(\\mathbf{x}^{\\ell})=\\sigma\\left(W_{K}^{\\ell}\\mathbf{x}^{\\ell}\\right)W_{V}^{\\ell},\n$$  \n\nwhere $W_{K}^{\\ell}$ ,$W_{V}^{\\ell}\\in\\mathbb{R}^{d_{m l p}\\times d}$ ∈. We notate the $i$ -th row in $W_{K}$ as $\\mathbf{MLP}\\mathbf{k}_{i}^{\\ell}$ and refer to them as key-vectors, and the i -th column in $W_{V}$ ,$\\mathbf{MLP}.\\mathbf{v}_{i}^{\\ell}$ , as value-vectors (we sometimes omit “MLP” and just use ${\\bf k}_{i}^{\\ell},{\\bf v}_{i}^{\\ell})$ ).  \n\nEquation (1 )indicates that the output of MLP blocks is the sum of its value vectors value $m_{i}^{\\ell}$ , where $\\mathbf{m}^{\\ell}:=\\sigma\\left(W_{K}^{\\ell}\\mathbf{x}^{\\ell}\\right)\\in\\mathbb{R}^{d_{m l p}}$ $\\mathbf{v}_{i}$ \x00, each scaled by a coefficient \x01:  \n\n$$\n\\mathtt{M L P}^{\\ell}(\\mathbf{x}^{\\ell})=\\sum_{i=1}^{d_{m l p}}\\sigma(\\mathbf{x}^{\\ell}\\cdot\\mathbf{k}_{i}^{\\ell})\\mathbf{v}_{i}^{\\ell}=\\sum_{i=1}^{d_{m l p}}m_{i}^{\\ell}\\mathbf{v}_{i}^{\\ell}.\n$$  \n\nPut differently, the MLP block writes to the residual stream $d_{m l p}$ times, once for each value vector. We call each of these updates a sub-update .  \n\nAll of our experiments are conducted with GPT2-medium, which has $L=24$ ,$d=1024$ , and $d_{m l p}=4096$ .  \n\nInterpreting Value Vectors in Vocabulary Space. Geva et al. (2022 ) demonstrate that for each sub-update, each value vector $\\mathbf{v}_{i}$ either promotes or suppresses the likelihood of a token $w$ from being generated:  \n\n$$\n\\begin{array}{r l}{p\\big(w\\mid\\mathbf{x}^{\\ell}+m_{i}^{\\ell}\\mathbf{v}_{i}^{\\ell},E\\big)}&{\\propto\\exp\\left(\\mathbf{e}_{w}\\cdot\\mathbf{x}^{\\ell}\\right)\\cdot\\exp\\left(\\mathbf{e}_{w}\\cdot m_{i}^{\\ell}\\mathbf{v}_{i}^{\\ell}\\right)}\\end{array}\n$$  \n\nwhere ${\\bf e}_{w}$ is the embedding of $w$ .This indicates that $\\mathbf{e}_{w}\\cdot m_{i}^{\\ell}\\mathbf{v}_{i}^{\\ell}>0$ , the likelihood of $w$ increases, while ${\\mathbf e}_{w}\\cdot m_{i}^{\\ell}{\\mathbf v}_{i}^{\\ell}<0$ ·decreases the likelihood.  \n\nFurther note that this dot product can be further decomposed. Namely, $\\mathbf{e}_{w}\\cdot\\mathbf{v}_{i}^{\\ell}$ is a “static” value that does not depend on the input: only when $\\mathbf{v}_{i}^{\\ell}$ is scaled by $m_{i}$ (which is determined by the its corresponding key vector, $\\mathbf{k}_{i}^{\\ell}$ , and the residual stream $\\mathbf{X}$ ) do we see the impact of the input towards the likelihood of $w$ .  \n\nThus the projection tokens that get promoted by value vector $\\mathbf{r}_{i}^{\\ell}=E\\mathbf{v}_{i}^{\\ell}\\in\\mathbb{R}^{|\\mathcal{V}|}$ ∈i $\\mathbf{v}_{i}$ uces a ranking of , in which tokens with the high dot products $\\mathbf{e}_{w}\\cdot\\mathbf{v}_{i}$ are promoted most by value vector $\\mathbf{v}_{i}$ . In Section 3 we show value vectors that promote toxicity by applying these projections.\n\n# 3. Toxicity in Pre-trained Language Models\nIn this section we demonstrate how toxicity is represented and elicited in GPT2, by introducing a series of vectors that can be extracted from the language model.\n\n# 3.1. Extracting Toxic Vectors\nToxicity Probe Vector. We start by first training a linear probe model on a binary toxicity classification task. Namely, we use the Jigsaw toxic comment classification dataset ( cjadams et al. ,2017 ), which consists of 561,808 comments, each of which is labeled as toxic or non-toxic. We use a 90:10 split for training and validation. We train our probe model, $W_{\\mathrm{Toxic}}$ , on the residual stream in the last layer, averaged across all timesteps $(\\overline{{\\mathbf{x}}}^{L-1})$ :  \n\n$$\nP(\\mathrm{Toxic}|\\overline{{\\mathbf{x}}}^{L-1})=\\mathrm{softmax}(W_{\\mathrm{Toxic}}\\overline{{\\mathbf{x}}}^{L-1}),W_{\\mathrm{Toxic}}\\in\\mathbb{R}^{d}\n$$  \n\nTable 1. Top toxic vectors projected onto the vocabulary space.   \nWARNING: THESE EXAMPLES ARE HIGHLY OFFENSIVE.   \nWe note that $\\mathrm{SVD}.\\mathrm{U}_{\\mathrm{Toxic}}[2]$ has a particularly gendered nature.   \nThis arises from the dataset and language model we use.  \n\n<html><body><table><tr><td>VECTOR</td><td>TOPTOKENS</td></tr><tr><td></td><td>c*nt,f*ck,a**hole,d*ck,wh*re,holes</td></tr><tr><td></td><td>sh*t, a**, cr*p, f*ck, c*nt, garbage, trash</td></tr><tr><td></td><td>delusional,hypocritical,arrogant,nonsense</td></tr><tr><td>MLP.V2669 18</td><td>degener, whining, idiots, stupid, smug</td></tr><tr><td>MLP.v668 13</td><td>losers, filthy, disgr, gad, feces, apes, thous</td></tr><tr><td>MLP.V255 16</td><td>disgrace, shameful, coward, unacceptable</td></tr><tr><td>MLP.v882 12</td><td>f*ck, sh*t, piss, hilar, stupidity, poop</td></tr><tr><td>MLP.V1438 19</td><td>c*m, c*ck, orgasm, missionary, anal</td></tr><tr><td>SVD.UToxic[O]</td><td>a**, losers,d*ck, s*ck, balls, jack, sh*t</td></tr><tr><td>SVD.UToxic[1]</td><td>sexually, intercourse, missive, rogens, nude</td></tr><tr><td>SVD.UToxic[2]</td><td>sex,breasts,girlfriends,vagina,boobs</td></tr></table></body></html>  \n\nOur probe vector achieves an accuracy of $94\\%$ on the validation split. We view our toxic probe vector $W_{\\mathrm{Toxic}}$ as an aggregate of all the relevant signals in the language model to classify an input as toxic.  \n\nToxic Vectors in MLP Blocks. Given our probe vector $W_{\\mathrm{Toxic}}$ , we can use it to find weights within the language model that promote toxicity. Namely, Geva et al. (2022 )demonstrate that value vectors promote tokens at a conceptlevel. Given this, we search for value vectors that promote toxicity, by checking for all value vectors with the highest cosine similarity with $W_{\\mathrm{Toxic}}$ . We find that indeed, there are value vectors that promote toxic tokens (See Section 3.2 ). We notate our set of toxic value vectors as $\\mathbf{MLP.v_{Toxic}}$ and their corresponding key vectors as $\\mathbf{MLP.k_{\\mathrm{Toxic}}}$ .  \n\nWe provide two perspectives of our $\\mathbf{MLP.v_{Toxic}}$ vectors: 1) when triggered, they promote the likelihood of toxic tokens to be generated, and 2) $\\mathbf{MLP.v_{Toxic}}$ are vectors within the model that contribute towards the $W_{\\mathrm{Toxic}}$ direction.  \n\nSVD: Decomposed Toxic Vectors. After extracting a set of $\\mathrm{{N}\\,(=\\!128)^{2}\\,M L P.v_{T o x i c}}$ vectors, we stack them into a $N{\\times}d$ matrix. We then apply singular value decomposition to get decomposed singular value vectors $\\mathrm{SVD}.\\mathrm{U}_{\\mathrm{Toxic}}$ . We refer to the $i$ -th singular value vector as $\\mathrm{SVD.U_{Toxic}}[i]$ . We view $\\mathrm{SVD.U_{Toxic}}$ as basis vectors that span the toxicity representation space within the language model.',
                         'paper_title': 'A Mechanistic Understanding of Alignment Algorithms: A Case Study on DPO and Toxicity',
                         'source_name': 'ICML', 'year': '2024', 'chunk_id': 1,
                         'chunk_ext_id': 454895308341250416},
                     {
                         'chunk_content': '# LINEAR LOG -N ORMAL A TTENTION WITH UNBIASED CONCENTRATION\nA P REPRINT  \n\nYury Nahshan, Joseph Kampeas and Emir Haleva Huawei, Tel-Aviv Research Center, Israel Email: {first.last}@huawei.com\n\n# A BSTRACT\nTransformer models have achieved remarkable results in a wide range of applications. However, their scalability is hampered by the quadratic time and memory complexity of the self-attention mechanism concerning the sequence length. This limitation poses a substantial obstacle when dealing with long documents or high-resolution images. In this work, we study the self-attention mechanism by analyzing the distribution of the attention matrix and its concentration ability. Furthermore, we propose instruments to measure these quantities and introduce a novel self-attention mechanism, Linear Log-Normal Attention, designed to emulate the distribution and concentration behavior of the original self-attention. Our experimental results on popular natural language benchmarks reveal that our proposed Linear Log-Normal Attention outperforms other linearized attention alternatives, offering a promising avenue for enhancing the scalability of transformer models. Our code is available in supplementary materials.\n\n# 1 Introduction\nTransformer models, proposed by (Vaswani et al., 2017), have become widely used deep learning architectures that have achieved state-of-the-art performance in various fields, including Natural Language Processing (NLP) (Brown et al., 2020; Devlin et al., 2018), Computer Vision (CV) (Dosovitskiy et al., 2020), Neural Machine Translation (NMT) (Chen et al., 2018), Document Summarization (Zhang et al., 2019; Pilault et al., 2020), and Protein Structure Prediction (Bahdanau et al., 2015). The main component of the Transformer model is an attention mechanism that identifies complex dependencies between tokens and efficiently captures tokens’ correlation. However, standard self-attention ffers from quadratic memory and computation complexity, which arises from the $N\\times N$ attention matrix, where $N$ is the sequence length. This problem is particularly significant during training, as it requires storing the attention matrix for gradient computation. Consequently, this significantly hinders the training of Transformer models with long sequences.  \n\nRecently, we have observed an increasing interest in training Transformer models with long sequences, especially when considering large language models (Scao et al., 2022; Zhang et al., 2022; Chowdhery et al., 2022). Various approaches address the quadratic memory issue in self-attention. One class of the methods is sparse-attention, which aims to perform only a subset of the attention computations while preserving the softmax function (Child et al., 2019; Zaheer et al., 2020). Another approach is Linearized Attention (LA), which replaces the softmax with a product of two functions (Choromanski et al., 2020; Katharopoulos et al., 2020). These methods reduce the computational and memory complexity of the attention mechanism while striving to maintain performance. One of LA’s benefits is that it performs dense operations and does not require special HW or low-level implementation. However, despite their efficiency, LA methods often underperform compared to standard self-attention. Thus, understanding the reasons behind the superior performance of self-attention is crucial for designing an effective LA method.  \n\nIn this paper, we proposed a systematic way to develop an LA method with comparable performance to the Softmax Attention (SA). First, we defined a holistic model of the SA and proceeded to examine its characteristics. Our analysis of the SA model was carried out from three different perspectives, focusing on its statistical, informational, and algebraic properties. In particular, we characterized the probability distribution of the attention matrix and proved its log-normal nature. We studied the concentration behavior of the SA and analyzed it in terms of the entropy and spectral gap (Coste, 2017). Based on the proposed model of the SA, we developed an LA method that emulates the distribution and concentration behavior of the SA, achieving comparable performance. Finally, we evaluated the effectiveness of our method on popular NLP benchmarks and compared it with other state-of-the-art methods. In summary, our contribution is as follows:  \n\n•We conducted an in-depth analysis of self-attention, characterizing its statistical, informational, and algebraic properties. Based on the entropy and the spectral gap metrics, we developed tools suitable for studying the concentration behavior of the attention and applied them to the SA.   \n•Using the developed model and proposed tools, we designed Linear Log-Normal Attention (LLN Attention) with comparable performance to SA while requiring linear memory and computational complexity in the sequence length.\n\n# 2 Background and related work\nIn this section, we present a brief overview of the attention mechanism and various methods for efficient and linearized attention. We review the most relevant works in the field, classifying them into different types of attention methods such as sparse attention, low-rank projection, memory-based, kernel-based approximations, and clustering-based methods.\n\n# 2.1 Background on Self-Attention\nIn the seminal study of (Bahdanau et al., 2015), the authors proposed the attention mechanism, which was subsequently incorporated into the Transformer model (Vaswani et al., 2017). Since then, attention has become a fundamental building block for many Transformer-based models.  \n\nConsider a sequence of $N$ tokens, where each token represented by $d$ -dimensional query, key, and value vectors, denoted as $\\{\\pmb{q}_{i}\\}_{i=1}^{N}$ ,$\\{\\pmb{\\dot{k}}_{i}\\}_{i=1}^{N}$ , and $\\{{\\pmb v}_{i}\\}_{i=1}^{N}$ , respectively. The SA is defined as:  \n\n$$\n\\mathrm{Attn}(\\pmb{q}_{i},\\{k_{1},\\ldots,k_{N}\\},\\{\\pmb{v}_{1},\\ldots,\\pmb{v}_{N}\\})=\\frac{\\sum_{j=1}^{N}\\kappa_{\\mathrm{sm}}(\\pmb{q}_{i},\\pmb{k}_{j})v_{j}^{\\top}}{\\sum_{l=1}^{N}\\kappa_{\\mathrm{sm}}(\\pmb{q}_{i},\\pmb{k}_{l})}\n$$  \n\nwhere $\\kappa_{\\mathrm{sm}}$ is an exponential kernel used in the softmax function:  \n\n$$\n\\kappa_{\\mathrm{sm}}(\\pmb{q}_{i},\\pmb{k}_{j})=e^{\\frac{\\pmb{q}_{i}^{\\top}\\pmb{k}_{j}}{\\sqrt{d}}}\n$$  \n\nThe recent study by (Wright and Gonzalez, 2021) has examined SA from the perspective of the kernel method. Notably, the formulation of SA in eq. (1) can be seen as Nadaraya-Watson kernel regression (Nadaraya, 1964), where estimating some unknown function with joint distribution $p(\\pmb{k},\\pmb{v})$ and density $p(\\pmb{k})$ with a kernel (Han et al., 2022). Moreover, as shown by (Tsai et al., 2019), other popular kernels, such as polynomial or Radial Basis Function (RBF), can be used instead of the exponential kernel. However, the performance may vary depending on the type of kernel used. A kernel method perspective of the attention allows us to address the problem of attention linearization by using the connection between any kernel and its feature embedding function $\\Phi$ , described by Mercer’s theorem (Mercer, 1909):  \n\n$$\n\\kappa(\\pmb{q}_{i},\\pmb{k}_{j})=\\langle\\Phi(\\pmb{q}_{i}),\\Phi(\\pmb{k}_{j})\\rangle\n$$',
                         'paper_title': 'Linear Log-Normal Attention with Unbiased Concentration',
                         'source_name': 'ICLR', 'year': '2024', 'chunk_id': 0,
                         'chunk_ext_id': 454845985266381918}],
                 content="In this chapter, we will delve into the architecture and principles of Large Language Models (LLMs). As mentioned earlier, the previous chapter introduced the rise of LLMs and their challenges in planning capabilities. Understanding the architecture and principles of LLMs is crucial for addressing these challenges and enhancing their planning abilities. According to the literature, LLMs are primarily based on the Transformer model, which utilizes the self-attention mechanism to capture long-range dependencies in sequences<sup>20</sup>. The Transformer architecture consists of embedding and unembedding layers, with a series of transformer layers in between, each containing attention heads and an MLP layer<sup>27</sup>. The self-attention mechanism is built on the notion of queries (Q), keys (K), and values (V), which are linear projections of the token embeddings<sup>20</sup>. This mechanism allows the model to learn long-range interactions across elements in a sequence, enabling it to capture complex dependencies between tokens<sup>28</sup>. Furthermore, the Transformer model incorporates positional embeddings to inject position information into the attention mechanism, addressing the limitation of handling longer inputs<sup>21</sup>. Studies show that the self-attention mechanism in Transformers has quadratic time and memory complexity, limiting their scalability for long sequences<sup>28</sup>. To address this issue, various approaches have been proposed, such as sparse attention, linearized attention, and approximation methods<sup>22</sup>. These approaches aim to reduce the computational cost while maintaining the performance of the model. In summary, the architecture and principles of LLMs, particularly the Transformer model and the self-attention mechanism, play a crucial role in capturing long-range dependencies and enabling complex reasoning. Understanding these concepts is essential for addressing the challenges in LLMs' planning capabilities and enhancing their performance.")

    s7 = Section(section_identifier="3", title="提升LLMs规划能力的方法",
                 description='全面探讨了提升LLMs规划能力的方法，包括基于提示学习、知识蒸馏、世界模型、对抗训练和元学习的方法。',
                 reference='',
                 content="""The chapter "提升LLMs规划能力的方法" delves into the realm of enhancing the planning capabilities of large language models (LLMs). This chapter is of paramount importance as LLMs, despite their remarkable advancements in natural language processing, often face challenges in planning due to their lack of internal world models and difficulties in multi-step reasoning and decision-making.

To address these challenges, the chapter explores various methods, including prompt-based learning, knowledge distillation, world models, adversarial training, and meta-learning. These approaches offer promising solutions for improving LLMs' planning abilities, enabling them to tackle complex tasks with greater efficiency and effectiveness.

In the subsequent subsections, we will delve into the details of each method, providing a comprehensive overview of their principles, types, and applications. We will explore the intricacies of prompt-based learning, including zero-shot and few-shot learning, prompt engineering, and template design. Additionally, we will examine the principles and types of knowledge distillation, focusing on distance metrics and energy-based models.

By delving into these methods, we aim to offer new insights and solutions for the research and application of enhancing LLMs' planning capabilities. Through a thorough examination of the current state, identification of challenges, and exploration of potential solutions, this chapter seeks to contribute to the advancement of LLMs and their ability to tackle complex planning tasks.""")
    s8 = Section(section_identifier="3_1", title="基于提示学习的方法",
                 description='介绍了基于提示学习的方法，包括零样本和少样本学习、提示工程和模板设计，以及基于检索的提示学习方法。',
                 reference='',
                 content="""The chapter "基于提示学习的方法" delves into the realm of prompt-based learning, a crucial approach for enhancing the planning capabilities of large language models (LLMs). This method is particularly significant as LLMs, despite their remarkable advancements in natural language processing, often face challenges in planning due to their lack of internal world models and difficulties in multi-step reasoning and decision-making.

### Zero-shot and Few-shot Learning

The sub-chapter "零样本和少样本学习" explores zero-shot and few-shot learning, which are pivotal for addressing these challenges. Zero-shot learning aims to solve unseen tasks without labeled training examples, while few-shot learning leverages a small amount of task-specific training data to improve training data efficiency in low-data regimes. These approaches are particularly relevant for LLMs, enabling them to learn and adapt to new tasks with limited or no explicit training data.

One notable method in zero-shot learning is MICRE, which introduces a new approach where an LLM is meta-trained to learn to in-context learn relations. MICRE has demonstrated competitive results compared to current state-of-the-art task-specific models and outperforms strong baselines, including supervised fine-tuning and in-context learning without metatraining methods. This highlights the potential of zero-shot learning in enhancing LLMs' planning capabilities.

In the realm of few-shot learning, prompt-based methods have shown promising results. These methods leverage in-context learning (ICL) with few-shot input-label exemplars as demonstrations, enabling LLMs to quickly learn tasks based on limited examples. However, zero-shot learning without task-specific data can lead to significantly degraded performance, prompting the need for instruction tuning with a multi-task paradigm to pre-train LLMs.

To further improve zero-shot learning, recent research has focused on dataset generation. Despite the synthesized data often containing low-quality samples and missing nuanced semantics, this approach holds potential for enhancing LLMs' planning capabilities. Additionally, active learning (AL) has emerged as a technique to reduce labeling effort by selecting the most useful examples to annotate. The FreeAL framework, which employs LLMs as weak annotators and SLMs to distill task-related knowledge from noisy annotations, demonstrates the effectiveness of combining active learning with LLMs.

### Prompt Engineering and Template Design

The sub-chapter "提示工程和模板设计" delves into the topic of prompt engineering and template design. These techniques are crucial for guiding LLMs to generate effective plans, especially in few-shot or zero-shot learning scenarios. Prompt engineering and template design involve crafting prompts and templates that provide LLMs with context, examples, and instructions to facilitate better understanding and generation of plans.

One effective approach to prompt engineering is the use of in-context learning, which involves providing a small number of examples to prompt an LLM without requiring fine-tuning for different tasks. This technique has been shown to be critical in various LLM applications, including embodied agents and visual navigation tasks. For instance, LLM-Planner utilizes a structured prompt design consisting of six parts: task introduction, goal instruction, step-by-step instruction, plan list, object list, and retrieval message. This design enables LLM-Planner to generate complete and high-quality high-level plans grounded in the current environment with a fraction of labeled data.

Another important aspect of prompt engineering is the design of program-like prompts. For example, PROGPROMPT proposes a program-like prompt for LLMs to generate executable plans for robotic agents. However, this approach assumes ground-truth objects and affordances, limiting the number of in-context examples. To address this limitation, LLM-Planner grounds the LLM to the current environment using only a pre-trained vision model, directly predicting high-level plans without relying on a list of admissible actions. This approach allows for dynamic adaptation of high-level plans during task execution, achieving competitive end-to-end performance on the ALFRED benchmark with long-horizon tasks.

In conclusion, prompt engineering and template design play a vital role in enhancing LLMs' planning capabilities. By providing context, examples, and instructions through carefully crafted prompts and templates, LLMs can generate more effective plans, especially in few-shot or zero-shot learning scenarios. As we continue to explore and refine these techniques, we can further unlock the potential of LLMs in various planning tasks.""")
    s9 = Section(section_identifier="3_1_1", title="零样本和少样本学习",
                 description='阐述了零样本和少样本学习的原理和方法，以及它们在LLMs规划能力中的应用。',
                 reference=[
                     {
                         'chunk_content': '# 5 Conclusion\nIn this work, we introduce M ICRE , a new zero and few-shot learning method where an LLM is meta-trained to learn to in-context learn relations, i.e. condition on training examples to recover the new relation semantics and make predictions. MICRE outperforms a range of strong baselines including supervised fine-tuning and in-context learning without metatraining methods. Besides, it achieves competitive results compared to current state-of-the-art task-specific models. We also analyze the advantages and limitations of M ICRE , encouraging more effective methods in the future research.\n\n# Acknowledgments\nWe thank the reviewers for their insightful comments. This work was supported by National Science Foundation of China (Grant Nos.62376057) and the Start-up Research Fund of Southeast University (RF1028623234). All opinions are of the authors and do not reflect the view of sponsors.',
                         'paper_title': 'Meta In-Context Learning Makes Large Language Models Better Zero and Few-Shot Relation Extractors',
                         'source_name': 'IJCAI', 'year': '2024', 'chunk_id': 7,
                         'chunk_ext_id': 454845601629615112},
                     {
                         'chunk_content': '# 2 Related Work\nThis work mainly explores the zero-shot learning capability of ChatGPT on a diverse collection of datasets including reasoning and classic NLP tasks. In light of this, we review three lines of research that form the basis of this work: large language models, zero-shot learning, and chain-of-thought prompting for reasoning.\n\n# 2.1 Large Language Models\nEver since Radford et al. (2019 ); Brown et al. (2020 ) demonstrated that language models can perform a variety of tasks without any gradient updates by providing the model with a textual instruction ( zero-shot ) and/or a few examples ( fewshot ), a great deal of work has focused on developing better large language models (LLMs). One line of work has aimed to explore the benefits of scaling up LLMs, including Megatron-turing NLG (Smith et al. ,2022 ) with 530 billion parameters, Gopher ( Rae et al. ,2021 ) with 280 billion parameters, and PaLM Chowdhery et al. (2022b ) with 540 billion parameters. The benefits of this scale have born out on stronger performance on more difficult tasks, e.g. the finding that PaLM outperformed average humans on the challenging BIGbench benchmark ( Srivastava et al. ,2022 ). These LLMs also form the basis of better models, such as Minerva ( Lewkowycz et al. ,2022 ) which achieved state-of-the-art performance on various technical benchmarks. Rather than scaling up model size alone, a separate line of research aims to attain better performance with smaller models through longer training ( Hoffmann et al. ,2022 ) or alternative objectives Tay et al. (2022 ). One particularly fruitful direction has been training LLMs with supervision ( Sanh et al. ,2021b ;Wei et al. ,2021 ;Mishra et al. ,2022 ;Chung et al. ,2022 )and/or human feedback ( Ouyang et al. ,2022 ). The strong performance of LLMs has led to a significant amount of work analyzing the abilities and behaviors of LLMs ( Webson and Pavlick ,2022 ;Qin and Joty ,2022b ;Min et al. ,2022 ;Liang et al. ,2022 ;Qin et al. ,2023a ,b).\n\n# 2.2 Zero-Shot Learning\nZero-shot learning aims to solve unseen tasks without labeled training examples. It results in a big challenge for models as they typically rely on large amounts of training data. Prior methods to solve zero-shot learning can be mainly divided into two categories: ( i )model-based methods focused on how to directly learn a model for unseen samples ( Fu et al. ,2017 ;Wang et al. ,2018 ); and (ii )instance-based methods tried to obtain labeled instances for unseen tasks to improve model learning ( Zhang et al. ,2017 ;Ye and Guo ,2017 ;Qin and Joty ,2022a ). More recent work has demonstrated the superiority of LLMs for zero-shot learning ( Brown et al. ,2020 ;Wei et al. ,2021 ;Chowdhery et al. ,2022b ). The most recent breakthrough of LLMs is the debut of ChatGPT, which has shown amazing ability in various aspects related to dialogue. Going a step further, we explore the zeroshot learning capability of ChatGPT on different tasks beyond dialogue in this work.\n\n# 2.3 Chain-of-Thought Prompting\nChain-of-thought (CoT) prompting induces LLMs to generate intermediate reasoning steps before answering ( Wei et al. ,2022 ). According to whether there are manual demonstrations, current CoT prompting methods can be divided into two main categories: manual-CoT and zero-Shot-CoT. In manual-CoT, LLMs perform CoT reasoning with manually designed demonstrations ( Wei et al. ,2022 ). Least-to-most prompting ( Zhou et al. ,2022 ) decomposed complex problems into subproblems and then sequentially solved the subproblems. Wang et al. (2022b ) introduced selfconsistency to sample multiple reasoning paths, and then conducted a majority vote to determine the final answer. To generate more diverse outputs, Li et al. (2022a ) and Wang et al. (2022a ) explored applying randomness in the input space. In zero-Shot-CoT, Kojima et al. (2022 ) demonstrated that LLMs are decent zero-shot reasoners by leveraging self-generated rationales. The effectiveness of self-generated rationales was also verified by STaR ( Zelikman et al. ,2022 ), which enabled the model to self-improve through its own generated rationales. Zhang et al. (2023a ) proposed AutoCoT to automatically generate rationales from test questions. Most recent studies mainly focused on how to improve manual-CoT, including optimizing the demonstration selection ( Rubin et al. ,2022 ;Fu Figure 2: Instructions and input formats of six different categories of tasks (sentiment analysis, natural language inference, named entity recognition, question answering, dialogue, and summarization). The task instructions are taken from or inspired by Brown et al. (2020 ), Ouyang et al. (2022 ), Zhang et al. (2023a ) and Ding et al. (2022 ). We color the instructions in blue . After reading the entire input (circled by the green dashed box), the model generates an answer.  \n\n  \n\n  \nFigure 3: Illustration of reasoning tasks. We show the instruction of AQUA-RAT ( Ling et al. ,2017 ) in this figure. Other reasoning tasks have similar instructions, e.g., “The answer (arabic numerals) is ” for MultiArith ( Roy and Roth ,2015 ). Note that we also conduct zero-shot chain-of-thought (zero-shot-CoT) experiments with ChatGPT and GPT-3.5 for reasoning tasks (right part).  \n\net al. ,2022 ;Lu et al. ,2022b ;Qin et al. ,2023c ) and optimizing the quality of reasoning chains ( Khot et al. ,2022 ;Chen et al. ,2022 ;Zhao et al. ,2023 ). In addition, researchers also studied the feasibility of adopting CoT in multilingual scenarios ( Shi et al. ,2022 ) and in smaller language models ( Magister et al. ,2022 ;Ho et al. ,2022 ). More recently, Zhang et al. (2023b ) proposed Multimodal-CoT that incorporates vision features in CoT reasoning, with the model under 1 billion parameters outperforming GPT-3.5 by $16\\%$ and even surpassing human performance on the ScienceQA benchmark ( Lu et al. ,2022a ).',
                         'paper_title': 'Is ChatGPT a General-Purpose Natural Language Processing Task Solver?',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845633511047146},
                     {
                         'chunk_content': '# 1 Introduction\nLarge language models (LLMs) have revolutionized the field of NLP, yielding impressive performance on various conventional natural language understanding (NLU) and generation (NLG) tasks. They are able to do this with only a handful ( i.e., few-shot) or sometimes even no training examples (i.e., zero-shot) ( Brown et al. ,2020 ;Du et al. ,2022 ;  \n\n  \nFigure 1: Existing LLM-based data augmentation needs unlabeled examples (labeler) or label-specific prompts (generator), while our framework generates examples for a variety of tasks in a unified way.  \n\nRae et al. ,2021 ;Thoppilan et al. ,2022 ;Chowdhery et al. ,2022 ). However, despite their effectiveness, there is a continued demand for the deployment of smaller trainable or tunable models in real-world scenarios due to cost constraints, existing servicelevel agreement response times, or privacy and security concerns around using black-box APIs. Unfortunately, application-specific custom models sometimes require large amounts of high-quality human-labeled data, in order to perform well. Thus, in order to reduce time and cost in the model deployment cycle, recent work has focused on trying to obtain training data by leveraging LLMs as either labelers to annotate unlabeled data ( Yoo et al. ,2021 ;Wang et al. ,2021a ;Lang et al. ,2022 ), or generators to generate new data samples ( Meng et al. ,2022 ;Ye et al. ,2022 ;Gao et al. ,2022 ).  \n\nDespite initial successes, constraints for these techniques continue to hinder their applicability in broader real-world settings. First, in the context of using LLMs as labelers , it is essential to have raw data that closely resembles the distribution of data in the predictive task. Most previous research has assumed access to a training dataset from which the labels are elided; however, for cold-start problems in the real-world, no such assumptions can be made. Curating raw data for tasks in specialized domains, such as those in the biomedical or legal fields, can be particularly challenging. Conversely, sampling a large volume of data at random can result in an imbalanced label distribution due to rare events ( Markov et al. ,2022 ).  \n\nMeanwhile, leveraging LLMs as generators requires careful curation of few-shot examples ( Hartvigsen et al. ,2022 ), or composition of prompts that highlight the semantic meaning of labels ( Wang et al. ,2021b ;Meng et al. ,2022 ;Ye et al. ,2022 ;Gao et al. ,2022 ), such as positive v. negative in sentiment classification. The latter has been a bottleneck to the broader applicability of LLMs as generators, however, since not all tasks have labels that are semantically meaningful, or are enumerable. Consider, for example the label yes v. no , which have no meaning when taken without context; or the options of a multiple choice QA (see Figure 1 ), which are an effectively open-ended label-set that varies from instance to instance. For these kinds of problems LLMs as generators continue to be inadequate.  \n\nIn this paper, we first present a formal framework for characterizing different approaches for LLM data creation. Specifically, we use graphical models as a way to characterize and unify disparate approaches that include LLMs as either labelers or generators (Section 2 ). Next, we propose a novel data creation pipeline that only requires a single formatting example to generate heterogeneous labeled data for various downstream applications, including those that focus on specialized domains. In contrast to current methods that require dataset-specific components ( e.g., label description, example selection), our pipeline serves as a unified solution that can be applied to a wide range of tasks, including those where the label set is either semantically devoid of meaning, or unenumerable.  \n\nOur data creation pipeline leverages an instruction-following LLM as a generator in conjunction with a single formatting example as a simple yet effective way of imposing structured constraints. Specifically, our approach iteratively conditions the generator on an instruction and a unique formatting example in a JSON format to yield multiple examples that vary in content but are formatted uniformly (Section more, as an efficient means of diversifying the gen3.1 −3.2 ). Furthererated data, we propose a “self-reference” strategy, which iteratively samples from the pool of newly created examples to seed the prompt for the next round of generation (Section 3.4 ). Specifically, we outline 4 distinct instantiations of “self-reference” including random ,contrastive ,similar , and tree sampling for controlled diversification of data.  \n\nWe evaluate our data creation pipeline on a battery of tests involving three distinct types of tasks, namely multiple-choice question answering (QA), open-book yes/no QA, and closed-book yes/no QA. The datasets for these tasks range across a variety of domains, including specialized ones such as the biomedical domain. Furthermore, for each category of task, we use a minimum of two datasets in order to compare the out-of-distribution (OOD) generalization of models using original data to synthetically generated LLM data. Our results demonstrate that leveraging LLMs as generators using our formatting-based creation approach is a highly costeffective way of creating data that can be effectively used to train models for a variety of downstream tasks, including those in specialized domains, and ones where labels are devoid of semantic meaning or vary across the data. For in-distribution (ID) settings, naturally having access to large amounts of high-quality manually curated and labeled data is still ideal. However, when only a small amount of human-labeled data is available, our approach yields results that are often comparable, and sometimes even better than the original datasets. This highlights the potential role LLMs can play in the model development cycle, especially in resourcepoor or specialized domains. Further, for the OOD settings, models trained on data generated by our pipeline consistently, and by large margins, outperform their counterparts trained on data from human sources. This robustness and generalizability has important implications for the deployment of realworld systems that deal with data that are variable, chaotic and often very different from curated academic datasets. We are realeasing our code and prompts to the community to spur future research in the area 1 .',
                         'paper_title': 'Making Large Language Models Better Data Creators',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845707977776288},
                     {
                         'chunk_content': '# 3.4 BLI-Oriented Fine-Tuning\nThis work predominantly focuses on ‘learningless’ experiments based on zero-shot and few-shot incontext setups with off-the-shelf mLLMs for BLI without any fine-tuning. As a side experiment, we also aim to fine-tune smaller-scale mLLMs, making them specialise into few-shot word translators with our few-shot prompts as input. Our training set is $w^{x}$ litself as an in-context example. We combine $\\mathcal{D}_{S}$ , but we now exclude retrieving an input the $\\mathcal{D}_{S}$ of each language pair with which we finetune encoder-decoder mLLMs with mT5’s spancorruption loss and fine-tune decoder-only LLMs with the standard causal LM objective.',
                         'paper_title': 'On Bilingual Lexicon Induction with Large Language Models',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 3,
                         'chunk_ext_id': 454845683117609890},
                     {
                         'chunk_content': '# 2 TASK FORMULATION\nIn this section, we first introduce LLM-based recommender models Formally, given the user sequences of the training set and uncover the challenge of real-world applicability. Thereafter, we U} formulate the task of data pruning for LLM-based recommendation , the target is to \uffffne-tune an LLM for recommendation tasks. The learnable parameters ( and compare the related work on coreset selection. ) of an LLM is optimized by minimizing on input tent capabilities of LLMs, LLM-based recommendation typically LLM-based recommender models. GTo leverage the compeutilize powerful LLMs directly as the recommender models. Since ’LLMs are not particularly trained on the recommendation data, fine-tuning is the necessary and key step for LLMs to learn the item knowledge and understand user behavior. Let the sets of users and items, respectively. We present each training Us the I denote is the user’s historical interactions in chronological order, and While \uffffne-tuning LLMs h $s=(x,y)$ ()strated $x=[i_{1},i_{2},...,i_{|x|}]$ |$y$ |To achieve e\uffffcient LLM-based recommendation, a the high resource costs required by LLMs and the •Formally, given the user sequences of the training set learnable parameters ( the negative log-likelihood of the next interacted item U} Data pruning for , the target is to fine-tune an LLM for recommendation tasks. The $(\\phi\\in\\Phi)$ ∈) of an LLM is optimized by minimizing nt LLM-based recommendation. {$\\left\\{i_{1},.\\;.\\;.\\;,i_{|x|},y\\right\\}\\subset{\\cal{J}}$ $y$ D$\\mathcal{D}=\\{s_{u}|u\\in\\}$ romising conditioned ||{} ⊂I |∈.approach on input 𝑥s to reduce the costs by few-shot \uffffne-tuning with :  \n\n$$\n\\operatorname*{min}_{\\phi\\in\\Phi}\\{\\mathcal{L}_{\\phi}^{L L M}=-\\sum_{t=1}^{|y|}\\log P_{\\phi}(y_{t}|y_{<t},x)\\},\n$$might lose some crucial information for LLMs to acquire the latest  \n\nrecom where sequence preceding $y_{t}$ ndation, which aims to id denotes the $y_{t}$ -th token of .𝑦, and tify $y_{<t}$ t of representative represents the token  \n\nWhile fine-tuning LLMs has demonstrated effectiveness in recommendation tasks [ 35 ], its practical application is hindered by the high resource costs required by LLMs and the continuous influx of new recommendation data [ 41 ]. Hence, it is essential to enhance the efficiency of LLM-based recommender fine-tuning.  \n\nTo achieve efficient LLM-based recommendation, a promising •Data pruning for efficient LLM-based recommendation. approach is to reduce the costs by few-shot fine-tuning with randomly selected samples [4]. Nevertheless, the random samples might lose some crucial information for LLMs to acquire the latest information on user behavior or items, e.g., trending items. In this light, we introduce the task of data pruning for efficient LLM-based recommendation, which aims to identify a set of representative samples particularly for LLMs’ few-shot fine-tuning. Formally, given all training samples pruning is the su size of Set is controlled by the given selection ratio Sselect a subset can yield good performance on the testing set. The S ⊂D $\\mathcal{D}\\;=\\;\\{s_{u}|u\\;\\in\\;\\mathcal{U}\\}$ , such that the LLMs trained on , the target of data $r$ ,i.e., $|S|=r|{\\mathcal{D}}|$ •to this data pruning task, coreset selection methods generally fall Retrospect of coreset selection. As the closely related work into two groups:  \n\n1) Heuristic methods [ 8 ,12 ,47 ] typically design some heuristic strategies to select samples based on an empirical minimizer:  \n\n$$\n\\boldsymbol{S}=H(\\boldsymbol{\\hat{\\theta}},\\mathcal{D}),\\quad\\mathrm{s.t.}\\quad\\boldsymbol{\\hat{\\theta}}=\\underset{\\boldsymbol{\\theta}\\in\\Theta}{\\arg\\operatorname*{min}}\\~\\mathcal{L}(\\boldsymbol{\\theta},\\mathcal{D}),\n$$  \n\nwhere sification [ $\\mathcal{L}(\\cdot)$ 19 is the loss function of the task, ] or CTR prediction [ 17 ], and $H(\\cdot)$ e.g., denotes the image clasheuristic strategy such as selecting samples with larger prediction entropy [ 8 ], or clustering the samples based on the sample representations [ 6 ]. However, this group of methods designs the strategy $H(\\cdot)$ intuitively and fails to explicitly consider the influence of a sample on the empirical risk. This might lead to suboptimal selection, thereby declining the performance of the model trained by the selected subset.  \n\n2) Optimization-based methods [ 5 ,26 ,27 ,52 ] mainly utilize bilevel optimization techniques to learn the best subset chosen for training:  \n\n$$\n\\boldsymbol{S}^{*}=\\underset{\\boldsymbol{S}\\subset\\mathcal{D}}{\\arg\\operatorname*{min}}\\ \\mathcal{L}(\\boldsymbol{\\hat{\\theta}},\\mathcal{D}),\\quad\\mathrm{s.t.}\\quad\\boldsymbol{\\hat{\\theta}}=\\underset{\\boldsymbol{\\theta}\\in\\Theta}{\\arg\\operatorname*{min}}\\ \\mathcal{L}(\\boldsymbol{\\theta},\\boldsymbol{S}).\n$$  \n\nBesides, there is also some work that employs discrete optimization problems based on the empirical minimizer $\\hat{\\theta}$ in Eq. (2). Nevertheless, they struggle to be applied to large-scale datasets e.g., recommendation data, due to the complex solving of the optimization problem [20].  \n\nFurthermore, as shown in Eq. (2-3), previous coreset selection methods usually require the model to be trained over original training samples recommender models due to the continuous influx of data and the $\\mathcal{D}$ , which however is infeasible for LLM-based high resource costs of LLMs ( cf. Section 1).  \n\n•for data pruning: 1) Drawing upon the above insights, we consider two objectives high accuracy emphasizes the low empirical risk of the model trained on the selected samples, and 2) high efficiency focuses on the low costs of the data pruning process, breaking free from the heavy fine-tuning of LLMs for data pruning.  \n\n  \nFigure 2: Overview of DEALRec. DEALRec first trains a surrogate model on the full training samples. Subsequently, it calculates the influence score, which is then regularized by the effort score, to identify influential samples.',
                         'paper_title': 'Data-efficient Fine-tuning for LLM-based Recommendation',
                         'source_name': 'SIGIR', 'year': '2024', 'chunk_id': 2,
                         'chunk_ext_id': 454846586172820280},
                     {
                         'chunk_content': '# 4.2 Benchmarks\nWe use the datasets described in Sec. 3.3 for training. Their test splits are used for evaluation ( RES dataset ,GRES ,VQA ). In addition, we use ReasonSeg [27] as a zero-shot evaluation for segmentation from complex reasoning scenarios. This comprehensive evaluation assesses the versatility and effectiveness of our model across a diverse range of referring expression segmentation scenarios.  \n\nIt is worth mentioning that, compared to other MLLM-based methods, our approach uses significantly less training data. A detailed comparison is presented in Tab. 1. For instance, GLaMM [42] is trained using the GranD [42] dataset, which has 11M images and 810M object masks. Its annotations are collected through various vision and language models including GPT-4-based rewrites of existing open-source datasets. In contrast, our SAM4MLLM only uses a small amount of mask annotation data (100K images, 82K object masks) to enable MLLMs to learn general information, but can produce high-quality segmentation masks in conjunction with SAM.  \n\nTable 1: Comparison of the training data from different methods. SAM4MLLM uses less training data than other MLLM-based methods, especially in terms of the number of masks.   \n\n\n<html><body><table><tr><td>Method</td><td>Train set w/ mask</td><td></td><td># img./mask |Train set w/o mask</td></tr><tr><td>LISA [27]</td><td>ADE20K [62], COCO-Stuff [7], PACO-|150K/1.2M LVIS [41], PartImageNet [18], PASCAL-   )</td><td></td><td>LLaVA-Instruct- 150k [33]</td></tr><tr><td>PerceptionGPT|refCOCO [39] </td><td>refCOCO+ [57],refCOCOg[36] [57], refCOCO+ [57], re-150K/3M fCoCOg [36], VisualGenome [26], Flicker30k [55]</td><td></td><td>MSCOCO-Caption [30], LLaVA-Instruct- 150K [33]</td></tr><tr><td>GSVA [50]</td><td>ADE20K [62], COCO-Stuff [7], PACO-|150K/1.2M LVIS [41], PartImageNet [18], PASCAL- Part [13], refCLEF [44], refCOCO [57], COCO[32]</td><td></td><td>LLaVA-Instruct- 150k [33]</td></tr><tr><td>GLaMM [42]</td><td>GranD [42] (Automatically annotated for|11M/810M SA-1B), GranD-f [42] (Based on Flickr- 30K [55], RefCOCOg [36], and PSG [52])</td><td></td><td></td></tr><tr><td>SAM4MLLM (Ours)</td><td>refCOCO [57], refCOCO+ [57], ref|100K/82K COCOg [36], gRefCOCO [32]</td><td></td><td>VQAv2[4]</td></tr><tr><td>SAM4MLLM* (Ours)</td><td>refCOCO [57]， refCOCO+ [57]， ref-145K/260K COCOg [36],gRefCOCO [32], ADE20K [62], PACO-LVIS [41], PartImageNet [18]</td><td></td><td>VQAv2 [4]</td></tr></table></body></html>\n\n# 4.3 Main Results\nWe compare the two variants of our SAM4MLLM, PPG and PQPP, with previous arts on various tasks. There have been numerous LLM-based methods emerging recently, but our comparisons primarily focus on their results using models of similar scales (7B).  \n\nTable 2: Comparison of methods on refCOCO, refCOCO+, and refCOCOg datasets.   \n\n\n<html><body><table><tr><td rowspan="2">Method</td><td colspan="3">refCOCO</td><td colspan="3">refCOCO+</td><td colspan="2">refCOcOg</td></tr><tr><td></td><td>valtestA testB</td><td></td><td></td><td></td><td></td><td>val testA testB val(U) test(U)</td><td></td></tr><tr><td>LLM based (13B)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>PerceptionGPT-13B [CVPR-24][39]75.3</td><td></td><td></td><td>79.1</td><td>72.1</td><td>68.9</td><td>74.0</td><td>61.9 70.7</td><td>71.9</td></tr><tr><td>GSVA-Llama2-13B[CVPR-24][50]</td><td></td><td>79.2</td><td>81.7</td><td>77.1</td><td>70.3</td><td>73.8</td><td>63.6 75.7</td><td>77.0</td></tr><tr><td>traditional methods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MAttNet (CVPR-18) [56]</td><td></td><td></td><td>56.51 62.37 51.70 46.67 52.39 40.08</td><td></td><td></td><td></td><td>847.64</td><td>48.61</td></tr><tr><td>STEP [ICCV-19] [9]</td><td></td><td></td><td>60.04 63.46 57.97 48.19 52.33 40.41</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LSCM [ECCV-20][22]</td><td></td><td></td><td></td><td></td><td></td><td>61.37 64.99 59.55 49.34 53.12 43.50</td><td>-</td><td></td></tr><tr><td>VLT [ICCV-21] [16]</td><td></td><td></td><td></td><td></td><td></td><td>65.65 68.29 62.73 55.50 59.20 49.36</td><td>52.99</td><td>56.65</td></tr><tr><td>SeqTR [ECCV-22] [63]</td><td></td><td>67.26 69.79 64.12 54.14 58.93</td><td></td><td></td><td></td><td>48.19</td><td>55.67</td><td>55.64</td></tr><tr><td>CRIS [CVPR-22][48]</td><td>70.5</td><td></td><td>73.2</td><td>66.1</td><td>65.3</td><td>68.1 53.7</td><td>59.9</td><td>60.4</td></tr><tr><td>LAVT [CVPR-22][53]</td><td>72.7</td><td></td><td>75.8</td><td>68.8</td><td>62.1</td><td>68.4 55.1</td><td>61.2</td><td>62.1</td></tr><tr><td>ReLA [CVPR-23][32]</td><td>73.8</td><td>76.5</td><td></td><td>70.2</td><td>66.0</td><td>71.0 57.7</td><td>65.0</td><td>66.0</td></tr><tr><td>X-Decoder [CVPR-23][65]</td><td></td><td></td><td>一</td><td></td><td></td><td></td><td>64.6</td><td></td></tr><tr><td>PolyFormer-L [CVPR-23] [34]</td><td></td><td>76.94 78.49 74.83 72.15 75.71 66.73</td><td></td><td></td><td></td><td></td><td>71.15</td><td>71.17</td></tr><tr><td>VPD [ICCV-2023][61]</td><td>73.25</td><td></td><td>-</td><td></td><td>62.69</td><td></td><td>61.96</td><td></td></tr><tr><td>ETRIS [ICCV-2023][51]</td><td></td><td>71.06 74.11 66.66 62.23 68.51 52.79</td><td></td><td></td><td></td><td></td><td>60.28</td><td>60.42</td></tr><tr><td>SEEM [NeurIPS-23][66]</td><td></td><td></td><td></td><td></td><td></td><td>-</td><td>65.7</td><td></td></tr><tr><td>LLM based (7B)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LISA-7B [CVPR-24][27]</td><td>74.9</td><td>79.1</td><td></td><td>72.3</td><td>65.1</td><td>70.8 58.1</td><td>67.9</td><td>70.6</td></tr><tr><td>PixelLM-7B [CVPR-24] [43]</td><td>73.0</td><td>76.5</td><td>68.2</td><td>66.3</td><td>71.7</td><td>58.3</td><td>69.3</td><td>70.5</td></tr><tr><td>PerceptionGPT-7B [CVPR-24][39]</td><td>75.1</td><td>78.6</td><td>71.7</td><td>68.5</td><td>73.9</td><td>61.3</td><td>70.3</td><td>71.7</td></tr><tr><td>GSVA-7B [CVPR-24] [50]</td><td>77.2</td><td>78.9</td><td>73.5</td><td>65.9</td><td>69.6</td><td>59.8</td><td>72.7</td><td>73.3</td></tr><tr><td>GLaMM-7B[CVPR-24][42]</td><td>79.5</td><td>83.2</td><td>76.9</td><td>72.6</td><td>78.7</td><td>64.6</td><td>74.2</td><td>74.9</td></tr><tr><td>SAM4MLLM-7B-PPG</td><td>76.2</td><td>80.1</td><td>72.0</td><td></td><td>71.2 75.9</td><td>64.3</td><td>74.2</td><td>74.3</td></tr><tr><td>SAM4MLLM-7B-PQPP</td><td>77.1</td><td>80.9</td><td>72.5</td><td></td><td>71.5</td><td>76.8 64.7</td><td>74.5</td><td>75.2</td></tr><tr><td>SAM4MLLM-7B-PQPP-LLaVA1.6</td><td>79.6</td><td>82.8</td><td>76.1</td><td></td><td>73.5 77.8</td><td>65.8</td><td>74.5</td><td>75.6</td></tr><tr><td>SAM4MLLM-8B-PQPP-LLaVA1.6</td><td>79.8</td><td>82.7</td><td>74.7</td><td>74.6</td><td>80.0</td><td>67.2</td><td>75.5</td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>76.4</td></tr></table></body></html>  \n\nNotably, our model is trained with nearly 100 times fewer images and 10000 times fewer masks compared to GLaMM, yet it still achieves comparable quality. The results show that by simply leveraging the connection between off-the-shelf MLLMs and SAM, our model can align MLLMs (already trained with large amounts of multimodal data) to the RES task using considerably less data. Since our model training is consistent with that of the original LLM, our method even performs better than all other methods in the case of longer and more complex sentence understanding (RefCOCOg).  \n\nAS for our PPG and PQPP approaches, SAM4MLLM-PQPP outperforms SAM4MLLM-PPG in all cases, as shown in Tab. 2. The results reveal that there is a trade-off between cost and quality when using PPG and PQPP. PPG predicts SAM prompts in once and needs only a single-turn conversation with MLLM. PQPP requires a two-turn conversation for bbox prediction and points classification but achieves better accuracy in most cases. We further use LLaVA1.6 as our MLLM backbone architecture instead of Qwen-VL for PQPP on these datasets. With a more powerful MLLM, the performance can be enhanced.  \n\n  \nFig. 3: Qualitative examples of SAM4MLLM on RES (top) and GRES (bottom) tasks. See Sec. 4.3 for detailed description.  \n\nGRES: We present the comparison on the gRefCOCO dataset [32] in Tab. 3. Unlike the RES dataset, this dataset contains the cases where multiple instances or no instances are referred. In this generalized RES task, our method sets the new state-of-the-art among the 7B models on most of the splits and metrics, except for “Test Set A”, where we lag slightly behind the recent GSVA [50].  \n\nReasonSeg: Our method also demonstrates superior results on the complex reasoning segmentation task, as shown in Tab. 4. It is worth noting that we evaluate on this dataset in a zero-shot manner, meaning our model was not trained on relevant tasks before. Besides, we use more training data to train SAM4MLLM, denoted as SAM4MLLM $^*$ . Despite using less training data than LISA, it can outperform LISA-13B-LLaVA1.5.  \n\nVQA: This dataset is not desifned for RES but for visual question answering. We use it to verify that our model, although enhanced by image segmentation functionality, still maintains its original capabilities. The VQA scores in Tab. 5 demonstrate that our approach does not compromise the VQA abilities acquired during the pre-training phase of our MLLM backbone. In fact, the VQA performance is even boosted, perhaps due to our fine-tuning on more datasets.  \n\nPQPP and PPG: Our PQPP consistently outperforms PPG on most results.   \nWe discuss the effect of points prompting strategy further in the ablation studies.  \n\nQualitative results: Fig. 3 presents qualitative examples of our SAM4MLLM approach on various referring expression segmentation datasets. We showcase our results on RES task in the upper row. The leftest image is from refCOCO, showing the successful segmentation of a specific zebra referred to as “behind another one.” The middle-left image, sourced from refCOCO+, demonstrates the accurate identification of the “middle animal” among multiple instances. The middle-right image from refCOCOg illustrates the model’s ability to handle more complex referring expressions, such as “The teddy bear that is as large as the baby.” Finally, the rightest image, also from refCOCO+, showcases the model’s understanding of relative positions, correctly segmenting the “dish closest” to the referred object. The bottom row demonstrates SAM4MLLM ability on generalized RES task, where our method accurately segments multiple instances as per the given text. These examples highlight SAM4MLLM’s capability to accurately segment objects based on diverse referring expressions across different datasets.  \n\nTable 3: Results on gRefCOCO(GRES).   \n\n\n<html><body><table><tr><td rowspan="2">Method</td><td colspan="3">ValidationSet</td><td colspan="3">Test Set A</td><td colspan="3">Test Set B</td></tr><tr><td>gloU</td><td>cIoU N-acc.</td><td></td><td>gIoU</td><td></td><td>cIoU N-acc.</td><td>gloU</td><td></td><td>cIoU N-acc.</td></tr><tr><td>LLM-based(13B）model</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LISA-13B [CVPR-24][27]</td><td></td><td>65.24 63.96</td><td>57.49</td><td>69.99</td><td>71.00</td><td>55.43</td><td>62.11</td><td>62.29</td><td>56.34</td></tr><tr><td>GSVA-13B [CVPR-24][50]</td><td></td><td>70.04 66.38</td><td>66.02</td><td>73.29</td><td>72.79</td><td>64.72</td><td>65.45</td><td>63.20</td><td>62.47</td></tr><tr><td>traditionalmethods</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MAttNet[CVPR-18][56]</td><td>48.24</td><td>47.51</td><td>41.15</td><td>59.30</td><td>58.66</td><td>44.04</td><td>46.14</td><td>45.33</td><td>41.32</td></tr><tr><td>LTS [CVPR-21] [24]</td><td>52.70</td><td>52.30</td><td>-</td><td>62.64</td><td>61.87</td><td>-</td><td>50.42</td><td>49.96</td><td></td></tr><tr><td>VLT [ICCV-21][16]</td><td>52.00</td><td>52.51</td><td>47.17</td><td>63.20</td><td>62.19</td><td>48.74</td><td>50.88</td><td>50.52</td><td>47.82</td></tr><tr><td>CRIS [CVPR-22][48]</td><td>56.27</td><td>55.34</td><td>-</td><td>63.42</td><td>63.82</td><td>-</td><td>51.79</td><td>51.04</td><td></td></tr><tr><td>LAVT[CVPR-22][53]</td><td>58.40</td><td>57.64</td><td>49.32</td><td>65.90</td><td>65.32</td><td>49.25</td><td>55.83</td><td>55.04</td><td>48.46</td></tr><tr><td>ReLA[CVPR-23][32]</td><td>63.60</td><td>62.42</td><td>56.37</td><td>70.03</td><td>69.26</td><td>59.02</td><td>61.02</td><td>59.88</td><td>58.40</td></tr><tr><td>LLM based (7B)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LISA-7B [CVPR-24][27]</td><td>61.63</td><td>61.76</td><td>54.67</td><td>66.27</td><td>68.50</td><td>50.01</td><td>58.84</td><td>60.63</td><td>51.91</td></tr><tr><td>GSVA-7B[CVPR-24][50]</td><td>66.47</td><td>63.29</td><td>62.43</td><td>71.08</td><td>69.93</td><td>65.31</td><td>62.23</td><td>60.47</td><td>60.56</td></tr><tr><td>SAM4MLLM-7B-PPG</td><td>68.37</td><td>65.66</td><td>63.71</td><td>69.05</td><td>69.62</td><td>65.96</td><td>63.71</td><td>62.35</td><td>61.25</td></tr><tr><td>SAM4MLLM-7B-PQPP</td><td>68.96</td><td>66.33</td><td>62.96</td><td>70.54</td><td>70.13</td><td>65.82</td><td>63.98</td><td>63.21</td><td>61.61</td></tr><tr><td>SAM4MLLM-8B-PQPP-LLaVA1.671.8667.8366.08</td><td></td><td></td><td></td><td></td><td>74.15 72.22</td><td>63.92</td><td></td><td>65.29 63.42</td><td>59.99</td></tr></table></body></html>',
                         'paper_title': 'SAM4MLLM: Enhance Multi-Modal Large Language Model for Referring Expression Segmentation',
                         'source_name': 'ECCV', 'year': '2024', 'chunk_id': 4,
                         'chunk_ext_id': 454846547780780630},
                     {
                         'chunk_content': '# DIntroduction to Our Experiment Method\n\n# D.1 Zero/Few-shot Evaluation Setting\nThe zero-shot setting is where the model is given explicit instructions to directly complete the mission. This scenario evaluates the ability of the original model to solve the problem autonomously without training. In our benchmark, the input to the zero-shot is the new knowledge of the artificial entity and a question to be asked about it.  \n\nCompared to the zero-shot setting, in the fewshot setting the model is given several additional examples from the same task as a reference. This allows evaluating the ability of the model to learn the task quickly based on a limited number of samples, and is also consistent with practical situations where supervised training is not convenient. According to the Min et al. (2022 )’s study, in our benchmark assessment, we provided 3 to 5 examples for each type of problem, expecting that it would be sufficient to be able to demonstrate the labeling space for this type of problem.\n\n# D.2 Chain-of-Thought (CoT) Form\nFor both the zero-shot and few-shot evaluation settings, we add the design of the CoT form. For the zero-shot setting, we added the words "Let’s think step by step." at the end of the question, expecting the model to output the thinking process, which can help LLM to reason about complex problems. For the few-shot setting, we add the thought process in the answer to each sample question shown to inspire the model.  \n\n<html><body><table><tr><td>Llama2-Chat-7B</td><td></td><td>Alpaca-7B</td><td>Vicuna-7B</td><td>ChatGLM-6B</td><td>Llama2-Chat-7B</td><td>Alpaca-7B</td><td>Vicuna-7B</td><td>ChatGLM-6B</td></tr><tr><td>Zero-Shot-Vanilla</td><td colspan="4"></td><td colspan="4">Zero-Shot-CoT</td></tr><tr><td>KU</td><td>28.92</td><td>31.02</td><td>28.00</td><td>34.64</td><td>47.75</td><td>39.81</td><td>32.74</td><td>24.85</td></tr><tr><td>KD</td><td>32.61</td><td>15.35</td><td>34.70</td><td>32.29</td><td>37.48</td><td>14.29</td><td>33.61</td><td>22.84</td></tr><tr><td>KA</td><td>18.71</td><td>24.60</td><td>23.40</td><td>10.29</td><td>24.61</td><td>19.66</td><td>17.56</td><td>4.97</td></tr><tr><td>Avg.</td><td>29.57</td><td>25.12</td><td>30.88</td><td>31.26</td><td>39.50</td><td>29.44</td><td>31.67</td><td>22.04</td></tr><tr><td></td><td colspan="4">Few-Shot-Vanilla</td><td colspan="4">Few-Shot-CoT</td></tr><tr><td>KU</td><td>37.06</td><td>33.80</td><td>32.78</td><td>47.97</td><td>65.20</td><td>40.77</td><td>38.50</td><td>40.91</td></tr><tr><td>KD</td><td>44.38</td><td>38.97</td><td>39.39</td><td>41.42</td><td>44.71</td><td>36.24</td><td>38.71</td><td>37.19</td></tr><tr><td>KA</td><td>24.42</td><td>27.63</td><td>24.39</td><td>27.47</td><td>28.72</td><td>25.73</td><td>27.11</td><td>26.93</td></tr><tr><td>Avg.</td><td>39.27</td><td>35.09</td><td>35.14</td><td>42.56</td><td>50.15</td><td>38.02</td><td>37.21</td><td>37.64</td></tr></table></body></html>  \n\nTable 10: Number of different forms of KU, KD and KA questions after filtering.   \n\n\n<html><body><table><tr><td></td><td>multiple choice</td><td>Boolean</td><td>fill-in-the-blank</td></tr><tr><td>KU</td><td>2025</td><td>3138</td><td>3814</td></tr><tr><td>KD</td><td>2486</td><td>24592</td><td>12601</td></tr><tr><td>KA</td><td>8757</td><td>0</td><td>0</td></tr><tr><td>Total</td><td>13268</td><td>27730</td><td>16415</td></tr></table></body></html>\n\n# EDetails of Our Approach to Filtering Questions\nSpecifically, we retain only those artificial entities whose parent entities could be perfectly recalled by the model. In addition, since answering multi-hop questions requires the model to make use of each single-hop knowledge, we then filter out any reasoning chains that contain knowledge that cannot be correctly recalled by the model.  \n\nThe method used for the above two filtering is to construct question templates for the knowledge involved, including attributes and relationships, based on previous work ( Petroni et al. ,2019 ), and then to query the model using the few-shot setting. We filter samples in our benchmark for every evaluated model to ensure that our questions are specific to the ability about new knowledge, and then select the intersection of filtered questions for fair experimentation and analysis. The number of questions per category left is shown in Table 10 .\n\n# FAnalysis about Models’ Output\nAn example of the output of the model is shown in Table 11 . To better analyze the models’ responses, as shown in Table 7 , we divide all the models’ error outputs into 3 categories, including rejecting responses, answering multiple options, and other incorrect responses.  \n\nWe can find that the percentage of answering multiple options for all models is very small, which indicates that all models can understand and comply with our requirements very well. In addition, some of the questions are rejected by some models, probably because some models recognizes that it cannot answer the corresponding question and responds with " I don’t know " or " I am sorry ".\n\n# GImpact of Different Modifications\nAs shown in Section 5.2 , models commonly struggle with knowledge differentiation when the artificial entity and the parent entity are similar. In this section, we further conduct an ablation study to investigate the specific impact of different modifications (i.e. variation and dropout).  \n\nTo ablate one type of modification, we reconstruct artificial entities in the KD dataset. For each question in the original KD dataset, we have a parent entity $e^{p}$ and the corresponding attribute $a$ to be queried. We then reconstruct several artificial entities by modifying one attribute of $e^{p}$ (except $a$ ) at a time. From this, we obtain several artificial entities with different similarity caused by the same type of modification. For a fair comparison, we only experiment with $e^{p}$ with 10 attributes. We randomly sample 1000 parent entities and create a total of $1000\\times10=10000$ new artificial entities. Finally, we conduct experiments by querying models about them with the same question about $a$ .  \n\nThe experiment results are presented in Table 7 . Different models exhibit almost the same trend under both modifications. It is clear that dropout yields a stable improvement as the similarity decreases while the impact of variation is relatively weak and insignificant.  \n\n  \nTable 11: Example of question and answer from our models.   \nFigure 7: Relationship between model performance on KD questions and the property similarity between the artificial entity and its parental entity due to different levels of modification, i.e. variation and dropout.\n\n# HPrompts in Experiments\nIn this section, we will show prompts used in our settings. Since our questions contain Boolean, multiple choice, and fill-in-the-blank forms, the rest of the prompt is identical except for the restrictions on the output format for each type of question that are targeted. Therefore, without loss of generality, we provide the prompts for the fill-in-the-blank questions as a demonstration. We present the CoT zero-shot prompt as an example in Table 12 . The other methods of prompt are basically similar.',
                         'paper_title': 'ALCUNA: Large Language Models Meet New Knowledge',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 7,
                         'chunk_ext_id': 454845690085922356},
                     {
                         'chunk_content': '# 2. Related Work\n\n# 2.1. Few-Shot and Zero-Shot Learning with PLMs\nInstead of using a large amount of annotated training data for fine-tuning PLMs on downstream tasks, few-shot learning studies how to better leverage only a small amount of task-specific training data, a more realistic scenario in many applications. The most strict few-shot learning setting does not assume access to any unlabeled data or large validation sets for hyperparameter tuning ( Perez et al. ,2021 ), where prompt-based methods ( Brown et al. ,2020 ;Gao et al. ,2021a ;Liu et al. ,2021b ;Logan IV et al. ,2021 ;Scao & Rush ,2021 ;Schick & Sch utze ,2021a ;b;c;Tam et al. ,2021 ;Zhang et al. ,2022 ) are prominently deployed to inject task descriptions into PLMs and make effective use of their language modeling capability for improved training data efficiency in low-data regimes. More broadly, semi-supervised learning additionally leverages unlabeled task-specific data, where data augmentation ( Chen et al. ,2020 ;Xie et al. ,2020 ), regularization ( Miyato et al. ,2017 ) and bootstrapping ( Schick & Sch¨utze ,2021a ) methods are commonly used.  \n\nZero-shot learning, on the other hand, is a much more challenging setting with absolutely no access to any task-specific data. When prompt-based methods are directly used to obtain predictions from PLMs without any training, their zeroshot performance can be much worse ( Brown et al. ,2020 ;Gao et al. ,2021a )—difficult NLU tasks can be barely formulated as prompts that resemble the format of pretraining data, posing great challenges for PLMs to accurately interpret and leverage the prompts without given any training samples. The current mainstream of zero-shot learning is based on transfer learning: By converting a set of tasks with abundant annotations into instruction templates ( Mishra et al. ,2021 ;Sanh et al. ,2021 ;Wei et al. ,2022 ;Xu et al. ,2022 ), entailment pairs ( Yin et al. ,2019 ;2020 ) or question-answer formats ( Puri & Catanzaro ,2019 ;Zhong et al. ,2021 ) and fine-tuning PLMs on them, the PLMs acquire the cross-task transfer ability ( Ye et al. ,2021 ) to execute unseen tasks when they are formulated in a similar format. Our work proposes a different approach from these studies: We use a unidirectional PLM to generate training data for fine-tuning another PLM on the target task. This not only removes the need for a large amount of cross-task annotations, but also eliminates the task difference in training and inference.\n\n# 2.2. Controlled Text Generation with PLMs\nControlled text generation ( Hu et al. ,2017 ) aims to steer the generated texts of language models towards desired contents, styles or domains. Through fine-tuning PLMs on attributespecific data, high-level control ( e.g. , generating certain topics or sentiments ( Ziegler et al. ,2019 )), fine-grained control ( e.g. , generating specific words or phrases ( Chan et al. ,2021 )) or both ( Khalifa et al. ,2021 ) can be achieved. Adapting PLMs to generate texts of specific attributes can also be realized at inference time without any further training of the PLMs ( Dathathri et al. ,2020 ;Krause et al. ,2021 ;Kumar et al. ,2021 ;Liu et al. ,2021a ;Pascual et al. ,2021 ;Yang & Klein ,2021 ). Different text attributes can also be represented during pretraining time as control codes ( Keskar et al. ,2019 ) which later can serve as explicit guidance for generating domain/attribute-specific texts.  \n\nAlong another line of controlling text generation, the idea of using prompts as guidance has emerged recently—Since natural language generation is largely based on contexts, providing certain prompts as the beginning of a sequence can effectively steer the subsequent texts to be generated. The prompts can be either in natural language ( Schick & Sch utze ,2021b ) or as learnable parameters ( Li & Liang ,2021 ). In this work, we also guide text generation via prompts, but for the novel purpose of creating training data for NLU tasks. The idea of using generated texts as training data has been explored in topic classification with bag-of-words or LSTMbased language models ( Meng et al. ,2018 ;2019 ) which do not have enough capacity to generate quality training data for challenging NLU tasks. Compared to annotated taskspecific data, the generated texts may contain noise and have domain difference from the downstream task. We will introduce several important strategies for effective fine-tuning of PLMs on generated data.\n\n# 3. Method\n\n# 3.1. Preliminaries\nProblem Formulation. We consider solving a classification proble are only given the label s $\\boldsymbol{\\wp}$ and a mapping a label-descriptive prompt ( M$\\mathcal{M}:\\mathcal{V}\\rightarrow\\mathcal{W}$ Y →W that converts each label i.e. , a short p e) $y\\in\\mathcal{V}$ $\\pmb{w}_{y}\\in\\mathcal{W}$ ∈Y into .We assume access to a unidirectional PLM $G_{\\theta}$ as the generator and a bidirectional PLM $C_{\\phi}$ which will be fine-tuned as the classifier 3 . We also assume the pretraining corpus $\\mathcal{D}$ (e.g. , Wikipedia) is available.  \n\nText Generation with Unidirectional PLMs. A unidirectional PLM $G_{\\theta}$ is pretrained to maximize the generation probability of each token in a sequence $\\textbf{\\em x}=$ $[x_{1},x_{2},\\ldots,x_{n}]$ conditioned on previous tokens:  \n\n$$\n\\operatorname*{max}_{\\theta}\\prod_{i=1}^{n}p_{\\theta}(x_{i}|\\pmb{x}_{<i}),\n$$  \n\nTable 1: Prompts used to generate class-conditioned texts for different GLUE tasks. SST-2 is a single-sequence classification task and the rest are sequence-pair classification tasks. Generation for CoLA does not use prompts but by varying sampling temperatures. $x^{s}$ denotes a sequence randomly sampled from the pretraining corpus; $\\pmb{x}^{g}$ denotes the sequence to be generated by $G_{\\theta}$ ;. . . denotes skipping at least one sequence. See Appendix Bfor more details.  \n\n<html><body><table><tr><td>Task</td><td>Label</td><td>Prompt</td></tr><tr><td>SST-2</td><td>positive negative</td><td>Rating: 5.0 9 Rating: 1.0 9</td></tr><tr><td>MNLI</td><td>entailment neutral</td><td>*. In other words, 9 ". Furthermore, 9 There is a rumor that *</td></tr><tr><td>QNLI</td><td>contradiction entailment</td><td>However, the truth is: 9 c? ∞9 "?...c9</td></tr><tr><td>RTE</td><td>not entailment entailment</td><td>?. In other words, 9</td></tr><tr><td>MRPC</td><td>not entailment equivalent not equivalent</td><td>?. Furthermore, 9 ?. In other words, 9 ".Furthermore,9</td></tr><tr><td>QQP</td><td>equivalent not equivalent</td><td>*? In other words, 9 *?Furthermore, 9</td></tr></table></body></html>  \n\nwhere $p_{\\theta}(\\cdot)$ is usually parameterized ing token embeddings eand contextualized embeddings hgiven by a Transformer ( Vaswani et al. ,2017 ) encoder:  \n\n$$\np_{\\theta}(x_{i}|\\pmb{x}_{<i})=\\frac{\\exp(e_{i}^{\\top}\\pmb{h}_{i})}{\\sum_{j=1}^{|V|}\\exp(e_{j}^{\\top}\\pmb{h}_{i})}.\n$$  \n\nAfter pretraining, $G_{\\theta}$ can be directly used to generate new texts by recursively sampling tokens from its output probability distribution. Typically, a temperature hyperparameter $\\tau>0$ is introduced during sampling ( Hinton et al. ,2015 )to adjust the sharpness of the probability distribution:  \n\n$$\np_{\\theta}(x_{i}|\\pmb{x}_{<i})=\\frac{\\exp(e_{i}^{\\top}{h_{i}}/\\tau)}{\\sum_{j=1}^{|V|}\\exp(e_{j}^{\\top}{h_{i}}/\\tau)},\n$$  \n\nwhere $\\tau\\rightarrow0$ app s greedily picking the most probable next token; τ→∞ induces a uniform distributio .Additionally, sampled tokens can be confined to the top$k$ most probable ones to avoid low-quality tokens. In this work, we find such top$k$ sampling with temperature is sufficient to produce coherent and meaningful texts as training data for NLU tasks. Exploring more sophisticated sampling strategies ( Holtzman et al. ,2020 ) is left for future work.',
                         'paper_title': 'Generating Training Data with Language Models: Towards Zero-Shot Language Understanding',
                         'source_name': 'NeurIPS', 'year': '2022', 'chunk_id': 1,
                         'chunk_ext_id': 454959868130625860},
                     {
                         'chunk_content': '# 2 Related Work\n\n# 2.1 Prompt-based Zero/Few-shot Learning\nThe emergent ability of LLMs has sparked heightened interest in prompt-based zero-shot and fewshot learning ( Ye et al. ,2021 ;Schick and Schütze ,2021 ). Instead of fine-tuning on massive downstream data, in-context learning (ICL) ( Brown et al. ,2020 ), which suits LLMs to new tasks with fewshot input-label exemplars as demonstrations without training, has shown promising few-shot performance. It has been further improved by later works (Liu et al. ,2022 ;Lu et al. ,2022 ;SU et al. ,2023 ).  \n\nOn the other hand, zero-shot learning is much more challenging without task-specific data. Direct steering LLMs for predictions without in-context demonstrations can lead to significantly degraded performance ( Gao et al. ,2021 ). To bridge this, some methods ( Wei et al. ,2022 ;Sanh et al. ,2022 ;Xu et al. ,2022 ) adopt instruction tuning with a multi-task paradigm to further pre-train the LLMs with a collection of different tasks in shared prompting templates. However, these methods require cumbersome training for LLMs and the overwhelming bulk of cross-task human annotations. Another new line of research ( Ye et al. ,2022a ;Meng et al. ,2022 ;Ye et al. ,2022b ) endeavors to ameliorate zero-shot learning merely via dataset generation, while the synthesized data commonly involves a notable portion of low-quality samples and misses the nuanced semantics present in the original data. In our work, we take inspiration from active learning with an innovative viewpoint to distill and filter the rich knowledge from LLMs for boosted zero-shot generalization performance.\n\n# 2.2 Active Learning\nActive learning (AL) is a prevailing paradigm in various NLP tasks ( Yuan et al. ,2020 ;Zhao et al. ,2020 ;Shelmanov et al. ,2021 ;Wang et al. ,2022 )that aims to reduce labeling effort by selecting only the most useful examples to annotate. In each iteration of active learning, a model is trained on the currently labeled data and then tasked with selecting the most informative yet-to-be-labeled data point to be labeled for boosted performance. Based on different querying strategies ( Settles and Craven ,2008 ), existing traditional active learning methods can be categorized into uncertainty-based methods ( Prabhu et al. ,2019 ;Margatina et al. ,2021 )and diversity-based methods ( Sener and Savarese ,2018 ;Ru et al. ,2020 ;Ash et al. ,2020 ). While these methods relieve the annotation burden to some extent, they still count on human experts as expensive supervision sources to start with. To overcome this high cost, we investigate the opportunities of leveraging the rich knowledge of LLMs as a lowcost supervision source for boosting generalization performance without human effort.  \n\n  \nFigure 2: Overview of FreeAL. In each collaborative training loop, the LLM serves as an active annotator imbuing its knowledge. Then the SLM is employed as a filter to distill the task-related knowledge with robust self-training from LLM and filter out a high-quality demonstration pool $\\mathcal{D}_{\\mathrm{demo}}$ to feedback the subsequent label refinery of LLM.\n\n# 3 Background\nWe consider unsupervised classification tasks without human annotations . Given an unlabeled trainset $D_{\\mathrm{train}}=\\{x_{i}\\}_{i=1}^{n}$ with $n$ samples, where ground-truth label ∈X is the input text and the corresponding $y\\,\\in\\,\\mathcal{V}\\,=\\,\\{1,\\dots,C\\}$ is inaccessible. Our task is to predict the true label for both the training dataset $\\mathcal{D}_{\\mathrm{train}}$ and test dataset $\\mathcal{D}_{\\mathrm{test}}$ .Our framework employs a pre-trained large language model (LLM) language model (SLM) a natural language template $\\mathcal{P}$ Sand a downstream small . For the LLM, we define $T(\\cdot)$ which contains additional task-related information and a verbalizer $V(\\cdot)$ to a pre-defined token in the prompt. For the finewhich maps each class label in $\\{1,\\ldots,C\\}$ tuning of SLM $\\mathcal{S}$ with parameters $\\theta$ , we adopt the cross entropy loss for training, where $\\begin{array}{r}{l_{i}\\,=\\,-\\sum_{j\\in Y}\\tilde{y_{i}}^{j}\\log S^{j}(x_{i},\\theta)}\\end{array}$ $S^{j}(x_{i},\\theta)$ is the ∈$j$ -th entry of SLM’s output softmax probability for the input $x_{i}$ with the pseudo label $\\tilde{y_{i}}^{j}$ .  \n\nFew-shot In-context Learning. When supervised data are available, we can directly employ the few-shot ICL for inference. In concrete, given a demonstration supporting pool $\\mathcal{D}_{\\mathrm{demo}}~=~\\{x_{i}^{\\mathrm{demo}},\\tilde{y}_{i}^{\\mathrm{demo}}\\}_{i=1}^{m}$ for prompt retrieval during ICL, we construct a prompt including a test input $x^{\\mathrm{{test}}}$ and $m$ -shot in-context examples $\\{(x_{j}^{\\mathrm{demo}},\\Tilde{y}_{j}^{\\mathrm{demo}})\\}_{j=1}^{m}$ demonstration. The final prompt steers the LLM retrieved from $\\mathcal{D}_{\\mathrm{demo}}$ as the and the prediction is obtained via,  \n\n$$\n\\begin{array}{r l r}&{}&{\\arg\\operatorname*{max}P_{y\\in Y}(V(y)\\mid T(x_{1}^{\\mathrm{demo}},\\tilde{y}_{1}^{\\mathrm{demo}}),}\\\\ &{}&{\\quad\\quad\\quad\\quad\\ldots,T(x_{m}^{\\mathrm{demo}},\\tilde{y}_{m}^{\\mathrm{demo}}),T(x^{\\mathrm{test}}))}\\end{array}\n$$  \n\nDespite the simplicity, the success of ICL largely hinges on the demonstration pool requires human efforts of careful annotation for $\\mathcal{D}_{\\mathrm{demo}}$ , which every individual scenario and can be particularly annoying for challenging tasks. To bridge this gap, we resort to our proposed plug-and-play method FreeAL without involving any human supervision.\n\n# 4 FreeAL\nIn this section, we introduce our proposed framework FreeAL which investigates the opportunity for human-free active learning in the LLMs era. In contrast to traditional active learning that requests human annotation in each training loop, FreeAL employs LLMs as weak annotators. In each training loop, we alternate the following steps:  \n\n1. Active labeling of the to-be-labeled samples via LLMs based on the feedback from SLMs.   \n2. Training weakly supervised SLMs to distill the task-related knowledge from noisy annotations of LLMs and in turn feedback to them.  \n\nThe overview of the FreeAL framework is displayed in Figure 2 and its overall pipeline is also shown in Algorithm 1 . In what follows, we will elaborate on our FreeAL framework minutely.',
                         'paper_title': 'FreeAL: Towards Human-Free Active Learning in the Era of Large Language Models',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845719570571468},
                     {
                         'chunk_content': '# 2.2 Models\nZero-shot and Few-shot Models For zero-shot and few-shot prompting, we explore various multilingual generative LLMs of different pretraining processes and architectures, including BLOOMZ, mT0 ( Muennighoff et al. ,2022 ) and XGLM ( Lin et al. ,2021 ). We explore all model sizes except for BLOOMZ 175B due to resource limitations. We also include ChatGPT into our analysis and specifically GPT${\\cdot3.5_{\\mathrm{turbo}}}$ is used. We explore 0, 1, 3, and 5-shot on each model with 5 diverse prompt templates. Details for each prompt can be seen in Appendix C.  \n\nFor the SA task, we compute the probability of the model to generate each label as the next immediate continual generation, and then we pick the label resulting in the highest probability for the whole sequence. For MT, SUM and LID, we perform standard text generation. However, for LID, we expect the generated text to follow a predefined format where each [token, language tag] pair is represented as [ token | tag ] . We parse the generation using a dynamic programming algorithm introduced in Paolini et al. (2021 ) to extract the valid [token, language tag] pairs for evaluation.  \n\nFine-tuning Models In addition to zero-shot prompting models and few-shot in-context learning, we also experiment with fine-tuning as a benchmark against prompting. For SA and word-level LID tasks, we fine-tune four models, namely, base and large variants of XLM-RoBERTa ( Conneau et al. ,2020 ), mBERT ( Devlin et al. ,2019 ), and mDeBERTa v3 ( He et al. ,2021 ).  \n\nFor MT, we fine-tune eight models in total. These include small ,base , and large variants of mT0 ( Muennighoff et al. ,2022 ); 418M and 1.2B variants of M2M100 ( Fan et al. ,2020 ); and standard, one-to-many, and many-to-many variants of mBART-50 ( Liu et al. ,2020 ;Tang et al. ,2020 )2  \n\nFor SUM, we follow the same setup used in MT, except we only fine-tune the three previously mentioned mT0 models and only the standard mBART50 as the one-to-many and many-to-many variants are specifically for translation only.  \n\nAcross all the tasks, we fine-tune the selected models on all the available training instances. Table 1 shows a full overview and comparison of the models investigated in this study and details for training setups for all tasks can be found in Appendix A .\n\n# 3 Results and Discussion\nOverall Results Figure 2 presents the results of various multilingual LLMs for the four CSW tasks. In general, we observe a scaling pattern when prompting multilingual LLMs across tasks. Nevertheless, the performance of these models significantly falls short when compared to that of substantially smaller fine-tuned models. Therefore, adopting a fine-tuned model is a more practical approach for dealing with CSW tasks, especially in scenarios with constrained computational resources. For ChatGPT, it demonstrates comparable performance to fine-tuned models across all tasks  \n\n<html><body><table><tr><td colspan="4">Sentiment Analysis</td><td colspan="3">Machine Translation</td><td colspan="2">Summarization</td><td colspan="3">Language Identification</td></tr><tr><td rowspan="2">Model</td><td colspan="3"></td><td colspan="3"></td><td rowspan="2">Model</td><td>— RL</td><td rowspan="2"></td><td colspan="2">F1</td></tr><tr><td>Mal-Eng</td><td>F1 Spa-Eng</td><td>Tam-Eng</td><td>Model</td><td>BLEU Hng→Eng</td><td>Eng-→Hng8</td><td>Hng>Eng</td><td>Model</td><td>Hin-Eng MSA-EA</td></tr><tr><td></td><td colspan="3">Finetuning</td><td colspan="3">Finetuning</td><td colspan="2">Finetuning</td><td colspan="3">Finetuning</td></tr><tr><td>XLMR278M</td><td>77.08</td><td>77.14</td><td>68.12</td><td>M2M100418M</td><td>28.53</td><td>12.40</td><td>mTO, 00M*</td><td>29.83</td><td>XLMR278M</td><td>82.44</td><td>72.58</td></tr><tr><td>XLMR560M</td><td>79.94</td><td>78.81</td><td>68.28</td><td>mBART50610M?</td><td>29.53</td><td>13.38</td><td>mTOp. 580M?</td><td>37.44</td><td>XLMR560M</td><td>86.65</td><td>79.79</td></tr><tr><td>mBERT178M</td><td>78.21</td><td>70.02</td><td>65.19</td><td></td><td>25.47</td><td>12.28</td><td>mTO,128*</td><td>40.12</td><td>mBERT178M</td><td>81.99</td><td>68.02</td></tr><tr><td>mDeBERTa278M</td><td>44.56</td><td>88.17</td><td>45.56</td><td>mTOp,1.2B</td><td>31.88</td><td>13.90</td><td>mBART50610M</td><td>39.03</td><td>mDeBERTa278M</td><td>85.41</td><td>68.02</td></tr><tr><td colspan="3">O-shot Prompting</td><td></td><td colspan="3">0-shot Prompting</td><td colspan="2">O-shot Prompting</td><td colspan="3">5-shot Prompting</td></tr><tr><td>mTO300M</td><td>36.79</td><td>48.44</td><td>42.26</td><td>mT0300M</td><td>2.74</td><td>1.60</td><td>mTO300M</td><td>16.00</td><td>mT0300M</td><td>2.13</td><td>0.90</td></tr><tr><td>mTO580M</td><td>44.60</td><td>56.01</td><td>47.62</td><td>mTO580M</td><td>6.42</td><td>2.37</td><td>mT0580M</td><td>20.16</td><td>mT0580M</td><td>0.30</td><td>0.00</td></tr><tr><td>mT01.2B</td><td>55.62</td><td>67.63</td><td>53.88</td><td>mT01.2B</td><td>10.64</td><td>1.88</td><td>mT01.2B</td><td>23.63</td><td>mTO1.2B</td><td>0.22</td><td>0.27</td></tr><tr><td>mTO3.7B</td><td>35.27</td><td>59.28</td><td>38.55</td><td>mTO3.7B</td><td>12.78</td><td>2.08</td><td>mT03.7B</td><td>27.40</td><td>mTO3.7B</td><td>0.19</td><td>1.49</td></tr><tr><td>mTO13B</td><td>49.97</td><td>65.26</td><td>50.76</td><td>mTO13B</td><td>19.28</td><td>1.66</td><td>mTO13B</td><td>30.67</td><td>mT013B</td><td>7.51</td><td>5.07</td></tr><tr><td>BLOOMZ560M</td><td>59.64</td><td>72.79</td><td>55.30</td><td>BLOOMZ560M</td><td>2.24</td><td>1.37</td><td>BLOOMZ560M</td><td>14.22</td><td>BLOOMZ560M</td><td>5.38</td><td>2.08</td></tr><tr><td>BLOOMZ1.1B</td><td>50.64</td><td>70.89</td><td>53.27</td><td>BLOOMZ1.1B</td><td>2.79</td><td>1.73</td><td>BLOOMZ1.1B</td><td>16.45</td><td>BLOOMZ1.1B</td><td>16.31</td><td>10.56</td></tr><tr><td>BLOOMZ1.7B</td><td>47.83</td><td>73.20</td><td>50.15</td><td>BLOOMZ1.7B</td><td>2.62</td><td>2.62</td><td>BLOOMZ1.7B</td><td>16.85</td><td>BLOOMZ1.7B</td><td>13.04</td><td>3.37</td></tr><tr><td>BLOOMZ3B</td><td>56.84</td><td>72.85</td><td>53.41</td><td>BLOOMZ3B</td><td>3.13</td><td>2.86</td><td>BLOOMZ3B</td><td>20.97</td><td>BLOOMZ3B</td><td>19.61</td><td>17.47</td></tr><tr><td>BLOOMZ7B</td><td>64.21</td><td>74.61</td><td>59.43</td><td>BLOOMZ7B</td><td>3.67</td><td>1.88</td><td>BLOOMZ7B</td><td>17.01</td><td>BLOOMZ7B</td><td>19.58</td><td>9.26</td></tr><tr><td>XGLM564M</td><td>52.18</td><td>64.16</td><td>52.66</td><td>XGLM564M</td><td>0.45</td><td>0.28</td><td>XGLM564M</td><td>4.29</td><td>XGLM564M</td><td>6.65</td><td>1.61</td></tr><tr><td>XGLM1.7B</td><td>50.83</td><td>65.01</td><td>50.55</td><td>XGLM1.7B</td><td>0.79</td><td>0.43</td><td>XGLM1.7B</td><td>5.42</td><td>XGLM1.7B</td><td>5.90</td><td>6.27</td></tr><tr><td>XGLM2.9B</td><td>60.15</td><td>64.78</td><td>56.43</td><td>XGLM2.9B</td><td>1.34</td><td>0.69</td><td>XGLM2.9B</td><td>5.75</td><td>XGLM2.9B</td><td>17.64</td><td>10.75</td></tr><tr><td>XGLM4.5B</td><td>62.32</td><td>70.34</td><td>56.94</td><td>XGLM4.5B</td><td>2.13</td><td>0.47</td><td>XGLM4.5B</td><td>4.73</td><td>XGLM4.5B</td><td>19.35</td><td>20.51</td></tr><tr><td>XGLM7.5B</td><td>60.93</td><td>68.52</td><td>56.04</td><td>XGLM7.5B</td><td>1.43</td><td>0.39</td><td>XGLM7.5B</td><td>5.92</td><td>XGLM7.5B</td><td>16.91</td><td>18.91</td></tr><tr><td>GPT-3.5turbo</td><td>65.92</td><td>75.64</td><td>63.15</td><td>GPT-3.5turbo</td><td>27.64</td><td>4.32</td><td>GPT-3.5turbo</td><td>25.07</td><td>GPT-3.5turbo</td><td>80.19</td><td>71.41</td></tr></table></body></html>\n\nαDue to budget limitations, the results presented in $\\mathrm{GPT}{\\mathord{\\mathrm{-}}}3.5_{\\mathrm{turbo}}$ are based on 1-shot prompting instead of 5-shot. β,,Hng refers to Hinglish, a mix of Hindi and English. mBART50 refers to the many-to-many variant. $\\mathrm{mT0_{p}}$ refers to the fine-tuned $\\mathbf{mT0}$ with prompt templates.  \n\nTable 2: Code-switching benchmark results for finetuned and prompting models. We report the 0-shot performance for the sentiment analysis, machine translation and summarization tasks; and 5-shot performance for the word-level language identification task.  \n\nand datasets, except for the English to Hinglish MT task. This exception may stem from the challenges in generating code-switched texts as outlined in previous research ( Yong et al. ,2023 ;Zhang and Eickhoff ,2023 ). For the remaining tasks, ChatGPT notably outperforms publicly available multilingual LLMs. Such discrepancy may be attributed to the RLHF objective in its pretraining process, although a comprehensive analysis is hindered by its proprietary nature.\n\n# 3.1 Sentiment Analysis Results\nFigure 5 shows a detailed breakdown for each of the three language datasets in the SA task. The results from fine-tuned models mainly reside in the top-left corner across all three datasets, highlighting their superior performance with considerably smaller sizes. Scaling BLOOMZ and XGLM yield small improvements, however, scores from mT0 fluctuate around $50\\;\\mathrm{F1}$ when varying sizes. It’s worth noting that the majority-class baseline of these three datasets has an average F1 score of 46. Considering the instability observed during the scaling-up process, mT0 struggles to understand the sentiment when presented in CSW texts.\n\n# 3.2 Machine Translation Results\nAs shown in Figure 2 and Table 2 , when the source is Hinglish and target English, the performance gap between prompting and fine-tuning in MT is much more apparent, with the best prompted LLM mT0-XXL achieving no more than 20 BLEU while all the fine-tuned models achieved between 25-32 BLEU score. In contrast to SA, we notice especially visible improvement during scaling up encoder-decoder style models such as mT0, while decoder-only models such as BLOOMZ and XGLM have minimal improvements given their overall poor performance.  \n\nWe then compare the difference in LLM scaling between translation tasks with code-switched sources and monolingual ones 4 . Figure 3 shows the scaling trajectory of LLMs for both Hindi English and Hinglish $\\rightarrow$ English translation direc$\\rightarrow$ tion; Table 3 presents the regression coefficient $(\\beta)$ in these two scenarios. A large coefficient indicates scaling has more noticeable impacts. We can observe that the influence of scaling is more apparent in monolingual sources than in the code-switched setup. This pattern could potentially result from the limited pretraining samples for Hinglish codeswitched data, leading to a sub-optimal scaling performance.  \n\n  \nFigure 2: Evaluation results of fine-tuning and prompting LLMs of different scales on various CSW tasks. (top left) F1-score on the sentiment analysis task, (top right) BLEU score on the machine translation task, (bottom left) ROUGE-L on the summarization task, and (bottom right) F1-score on the word-level language identification task. (FT) means results are from fine-tuned models.  \n\nWhen models are tasked with translating the source into CSW text, a substantial performance decline is observed for both fine-tuned and prompted models. We notice that while the larger mT0 models are capable of producing English translations in a zero-shot manner, they struggle to generate CSW texts as seen in previous work ( Yong et al. ,2023 ). Upon looking at the output, mT0 simply outputs in English, even in few-shot settings in which it has seen some other Hinglish examples.',
                         'paper_title': 'Multilingual Large Language Models Are Not (yet) Code-Switchers',
                         'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                         'chunk_ext_id': 454845652970522638}],
                 content="""In this chapter, we will delve into zero-shot and few-shot learning, which are crucial for enhancing the planning capabilities of large language models (LLMs). As mentioned earlier, LLMs have made significant strides in natural language processing, yet they face challenges in planning due to their lack of internal world models and difficulty in multi-step reasoning and decision-making. Zero-shot and few-shot learning offer promising approaches to address these challenges.

Zero-shot learning aims to solve unseen tasks without labeled training examples, while few-shot learning leverages a small amount of task-specific training data to improve training data efficiency in low-data regimes. These approaches are particularly relevant for LLMs, as they enable them to learn and adapt to new tasks with limited or no explicit training data.

One notable method in zero-shot learning is MICRE, which introduces a new approach where an LLM is meta-trained to learn to in-context learn relations. MICRE has demonstrated competitive results compared to current state-of-the-art task-specific models and outperforms strong baselines, including supervised fine-tuning and in-context learning without metatraining methods. This highlights the potential of zero-shot learning in enhancing LLMs' planning capabilities.

In the realm of few-shot learning, prompt-based methods have shown promising results. These methods leverage in-context learning (ICL) with few-shot input-label exemplars as demonstrations, enabling LLMs to quickly learn tasks based on limited examples. However, zero-shot learning without task-specific data can lead to significantly degraded performance, prompting the need for instruction tuning with a multi-task paradigm to pre-train LLMs.

To further improve zero-shot learning, recent research has focused on dataset generation. Despite the synthesized data often containing low-quality samples and missing nuanced semantics, this approach holds potential for enhancing LLMs' planning capabilities. Additionally, active learning (AL) has emerged as a technique to reduce labeling effort by selecting the most useful examples to annotate. The FreeAL framework, which employs LLMs as weak annotators and SLMs to distill task-related knowledge from noisy annotations, demonstrates the effectiveness of combining active learning with LLMs.

In conclusion, zero-shot and few-shot learning provide valuable techniques for enhancing the planning capabilities of LLMs. Through approaches such as MICRE, prompt-based methods, dataset generation, and active learning, LLMs can learn and adapt to new tasks with limited or no explicit training data. These advancements pave the way for LLMs to tackle complex planning challenges and expand their applicability in various domains. In the next chapter, we will explore another method for improving LLMs' planning capabilities: knowledge distillation.""")
    s10 = Section(section_identifier="3_1_2", title="提示工程和模板设计",
                  description='介绍了提示工程和模板设计的原理和方法，以及它们在LLMs规划能力中的作用。',
                  reference=[
                      {
                          'chunk_content': '# 2. Related Work\n\n# 2.1. Large Multimodal Models\nLarge multimodal models (LMMs) have significantly enhanced the performance of tasks requiring the understanding of diverse modalities. These models generally fall into two categories based on their use of large language models (LLMs).  \n\nThe first category [ 21 ,31 ,36 ] involves models trained from scratch or employing relatively smaller language models like BERT for text processing. They typically utilize a blend of contrastive and generative objectives to address a range of multimodal tasks. However, their limited language understanding capacity often hinders their performance in tasks that demand extensive world knowledge and reasoning abilities.  \n\nThe advent of LLMs has spurred a new direction in LMM development, where LLMs are augmented with multimodal comprehension capabilities [ 26 ,29 ,30 ,38 ]. Common approaches in this framework involve integrating adapters to align visual and textual representations within LLMs. Notable examples include BLIP-2 [ 15 ], Flamingo [ 1 ], MiniGPT-4 [ 41 ], llama-adapter [ 10 ,37 ], LLaVA [ 17 ], InstructBLIP [ 8 ], InternGPT [ 19 ], and QwenVL [ 2 ]. While these methods have shown improved performance in vision-language tasks through instructional tuning, their primary limitation lies in generating only textual outputs about the entire image, thus constraining their utility in tasks requiring more granular, region-level or pixellevel understanding.\n\n# 2.2. Fine-Grained LMMs\nIn many practical applications, understanding visual inputs at a more detailed level is crucial, such as specific regions or even at the pixel level. Several methods have been proposed to endow LLMs with this fine-grained understanding capability. GPT4RoI [ 39 ], Shikra [ 5 ], VisionLLM [ 32 ], Kosmos-2 [ 27 ], InternGPT [ 20 ], and Ferret [ 35 ]offer grounding capabilities to specified image regions, typically encoding location coordinates as tokens for integration with LLMs. Unlike these methods, which lack the ability to generate pixel-wise masks, LISA [ 14 ] integrates SAM with LLMs for segmentation tasks. Moreover, LISA explores the use of LMMs for complex instruction reasoning, which differs from traditional tasks that rely on explicit human instructions for object or category identification. However, LISA is constrained to handling single targets in images, and the incorporation of SAM adds significant computational overhead. In contrast, our goal is to develop an efficient LMM capable of pixel-level image reasoning and understanding, accommodating a varied number of targets and diverse reasoning complexities.  \n\n  \nFigure 2. Overview of the proposed PixelLM model architecture. (Left) Overall architecture. (Right) The proposed lightweight pixel decoder. Trainable LoRA parameters are incorporated into the LLM. All parameters except those for the CLIP encoder and LLM are trainable.\n\n# 3. Method\nWe first outline the framework in Sec. 3.1 , elucidating two key designs: pixel decoder and segmentation codebook. Training objectives are introduced in Sec. 3.2 .',
                          'paper_title': 'PixelLM: Pixel Reasoning with Large Multimodal Model',
                          'source_name': 'CVPR', 'year': '2024', 'chunk_id': 1,
                          'chunk_ext_id': 454849206430391516},
                      {
                          'chunk_content': '# 6. Conclusion\nWe demonstrate that large language models (LLMs) can be used as a planner for embodied agents and can dramatically reduce the amount of human annotations needed for learning the instruction following task. Our work opens a new door for developing versatile and extremely sampleefficient embodied agents by harnessing the power of large language models and enhancing them with physical grounding. Promising future directions include exploring other LLMs such as Codex [ 5 ], better prompt design, and more advanced methods for grounding and dynamic re-planning.\n\n# Acknowledgement\nThe authors would like to thank the colleagues from the OSU NLP group for their thoughtful comments. This research was supported by ARL W911NF2220144.\n\n\n\n# Appendices\nIn this supplementary material, we present additional details and clarifications that are omitted in the main text due to space constraints.  \n\n•Appendix A : Comparison with $({\\mathrm{SL}})^{3}$ on ALFRED   \n•Appendix B : Prompt design choices and prompt selection under true few-shot setting (cf. section 3.2 in the main paper)   \n•Appendix C : Additional fine-grained analyses (cf. section 4 in the main paper)\n\n# A. Comparison with $(\\mathbf{SL})^{3}$ on ALFRED\n$(\\mathrm{SL})^{3}$ [31 ] is a recent hierarchical planning model that is also evaluated on the ALFRED benchmark. It randomly samples $10\\%$ of ALFRED’s training data for training. The high-level planner is based on a pre-trained T5-small [ 27 ]model, which is fine-tuned to generate high-level plans from the goal instruction. The low-level planner is another finetuned T5-small model, which is tasked of generating a lowlevel plan for each subgoal in the high-level plan. Both goal and step-by-step instructions are needed for training, but only goal instructions are needed at inference time.  \n\nWe could not compare $(\\mathrm{SL})^{3}$ under the same few-shot setting as LLM-Planner because its code was not publicly available at the time of submission. However, we would like to highlight that our method achieves comparable performance on the validation set despite using only less than $1/20$ of training data than $\\mathrm{(SL)}^{3}$ ($0.5\\%$ vs. $10\\%$ of ALFRED’s training data).\n\n# B. Prompt Design Choices\nIn-context learning with GPT-3 could be sensitive to the prompt design. In Table 3 , we show different prompt design choices we have experimented for LLM-Planner. We structure our prompt into six consecutive parts: task introduction, goal instruction, step-by-step instruction, plan list, object list, and retrieval message. For each part, we have a default phrase and a list of additional options to try on top of the default phrasing signified as []. All the options listed only modify the phrase that goes in []. First, we try adding punctuation marks around actions and object. Next, we naturalize each action name as a plain English text. Lastly, we experiment with finding the optimal delimiter between action list and step-by-step instruction list. We compared comma, period, and newline inserted between the sentences. The best prompt was chosen from the LOOCV accuracy for high-level plans and is bolded.\n\n# C. Additional Fine-Grained Analyses\n\n# C.1. HLP Accuracy by Task Type\nWe show LLM-Planner’s high-level planning (HLP) accuracy breakdown by task type in Table 4 . Because it is difficulty to determine a single value for the HLP accuracy for dynamic LLM-Planner, here we focus on the static version, but the HLP accuracy of the dynamic version generally correlates well with that of the static version. From the results, we observe that the results do not depend much on the difficulty of the task. For example, the task “Stack & Place” is often considered as the most difficult task based on the success rate of state-of-the-art models, but LLM-Planner’s HLP accuracy is similar to those of easier tasks such as “Place two” . We find that LLM-Planner is not overly sensitive to the complexity of tasks. This suggests that it could generalize well to different types of tasks with only a few in-context examples.\n\n# C.2. End-to-End Performance by Task Type\nWe show the end-to-end performance breakdown by task type of dynamic LLM-Planner $+\\mathrm{\\HLSM}$ in Figure 5 . As a reference, we also compare with HLSM and FILM trained with the full training set of ALFRED. Keep in mind that this is not apples-to-apples comparison because LLM-Planner is under the few-shot setting. Despite that, we can see that LLM-Planner $+\\mathrm{\\HLSM}$ achieves comparable performance with HLSM, and the distribution of the two are similar. This is likely due to the shared low-level planner and object detector, which introduce a similar error profile. This again shows that our few-shot high-level planner is as good as HLSM’s high-level planner that is trained with the full training set. On the other hand, it also shows that there is still a large room to improve by using better low-level planners and object detectors. For example, even though our HLP accuracy for “Heat & Place” is $36\\%$ as shown in Table 4 , we could only get $1.8\\%$ success rate due to the object detector from HLSM often failing to detect the “microwave” . If we use FILM’s low-level planner and object detector, we may be able to achieve much better performance on this task.  \n\n<html><body><table><tr><td>Options</td><td>Task Introduction</td><td>Goal Instruction</td><td>Step-by-step Instructions</td><td>Plan List</td><td>Object List</td><td>Retrieval Message</td></tr><tr><td>Default</td><td>Create a high-level plan for completing a household task using the allowed actions and visible objects. Allowed actions are [action list]</td><td>Task description: [goal instruction]</td><td>Step-by-step instructions: [instructions]</td><td>(Completed, Next) plan: [subgoals]</td><td>Visible objects are [objects]</td><td>Next plan:</td></tr><tr><td>Punctuation</td><td>("PickupObject") (PickupObject) PickupObject</td><td></td><td></td><td>("PickupObject", "Apple") (PickupObject, Apple) PickupObject, Apple</td><td></td><td></td></tr><tr><td>Naturalization</td><td>PickupObject Pickup Pick up</td><td></td><td></td><td>PickupObject Pickup Pick up</td><td></td><td></td></tr><tr><td>Delimiter</td><td></td><td></td><td>Pick up, go to Pick up. Go to. Pick up \\n Go to</td><td>Pickup, Navigate Pickup. Navigate Pickup\\n Navigate</td><td>Apple, orange Apple. orange Apple \\n Orange</td><td></td></tr></table></body></html>  \n\nTable 3. For each element in our prompt design, we list the default phrasing. For the representation of actions, objects, and lists, we additionally experiment with different choices of punctuation, naturalization, and the delimiter between elements in a list. We select the optimal prompt design using LOOCV on the 100 training examples. The chosen options are highlighted in bold.  \n\nTable 4. Static LLM-Planner’s high-level planning accuracy breakdown by task type.   \n\n\n<html><body><table><tr><td rowspan="2">Task Type</td><td colspan="2">HLP Accuracy</td></tr><tr><td>ValidUnseen</td><td>ValidSeen</td></tr><tr><td>Pick&Place</td><td>51</td><td>46</td></tr><tr><td>Stack&Place</td><td>38</td><td>25</td></tr><tr><td>PlaceTwo</td><td>39</td><td>45</td></tr><tr><td>Examine</td><td>44.4</td><td>49</td></tr><tr><td>Heat&Place</td><td>36</td><td>48</td></tr><tr><td>Cool&Place</td><td>43</td><td>46</td></tr><tr><td>Clean&Place</td><td>48.8</td><td>32</td></tr></table></body></html>  \n\n  \nFigure 5. Success rate by task type on ALFRED valid unseen split.',
                          'paper_title': 'LLM-Planner: Few-Shot Grounded Planning for Embodied Agents with Large Language Models',
                          'source_name': 'ICCV', 'year': '2023', 'chunk_id': 8,
                          'chunk_ext_id': 454848411359935930},
                      {
                          'chunk_content': '# 2 Related Work\nLifelong Learning for PLMs. Lifelong learning aims at incrementally acquiring new knowledge, and in the meantime, mitigating the catastrophic forgetting issue. Numerous efforts have been spent towards this goal, including (1) memory-based methods ( Rebuffiet al. ,2017 ;Rolnick et al. ,2019 ), which perform experience replay with authentic data ( de Masson d’Autume et al. ,2019 ), automatically generated data ( Sun et al. ,2020 ), or previously computed gradients ( Lopez-Paz and Ranzato ,2017 ) conserved in the memory, (2) consolidationbased methods ( Kirkpatrick et al. ,2017 ;Aljundi et al. ,2018 ), which introduce additional regularization terms to consolidate the model parameters that are important to previous tasks, and (3) dynamic architecture methods ( Rusu et al. ,2016 ;Yoon et al. ,2018 ), which fix trained network architectures in old tasks and dynamically grow branches for new tasks. Lifelong learning is also a hot topic for PLMs. Some target at domain adaptation through continual pre-training ( Gururangan et al. ,2020 ), parameter-efficient adapters ( He et al. ,2021 ) and sparse expert models ( Gururangan et al. ,2021 ). Others focus on the incremental acquisition of factual knowledge that changes over time ( Dhingra et al. ,2021 ;Jang et al. ,2021 ). However, the existing works seldom consider our lifelong learning setting where streaming data from multiple sources is sequentially gathered. Recently, researchers have also conducted a series of empirical studies on the continual learning of PLMs ( Wu et al. ,2021 ;Jin et al. ,2021 ).  \n\nEfficient Pre-training in NLP. Many attempts have been made towards improving the efficiency of pre-training, such as designing novel pretraining tasks ( Clark et al. ,2020 ), model architectures ( Zhang and He ,2020 ), optimization algorithms ( You et al. ,2020 ) and parallel architectures ( Shoeybi et al. ,2019 ;Shazeer et al. ,2018 ). Until recently, researchers propose to “back distill” the knowledge from existing PLMs to accelerate large PLMs’ pre-training ( Qin et al. ,2021a ). Another line of work proposes progressive training to dynamically expand an existing PLM’s size through parameter recycling ( Gong et al. ,2019 ;Gu et al. ,2021 ;Chen et al. ,2021 ). However, these methods typically focus on training PLMs on one static corpus, and thus cannot be directly applied to our lifelong pre-training setting.\n\n# 3 Methodology\n\n# 3.1 Preliminaries\nBackground for PLM. consists of an embedding layer and A PLM $\\mathcal{M}$ LTransnerally former ( Vaswani et al. ,2017 ) layers. Given an input $\\mathbf{x}$ consisting of a series of tokens, i.e., $\\mathbf{x}~=~\\{w_{1},\\ldots,w_{|\\mathbf{x}|}\\}$ ,$\\mathcal{M}$ first converts the input into embeddings quentially processed by each Transformer layer $\\{\\mathbf{h}_{1}^{0},...,\\mathbf{h}_{|\\mathbf{x}|}^{0}\\}$ ||, which are seinto contextualized hidden representations $\\mathbf{H}^{l}=$ $\\{\\mathbf{h}_{1}^{l},\\...\\.,\\mathbf{h}_{|\\mathbf{x}|}^{l}\\}$ , where $1\\!\\leq\\!l\\!\\leq\\!L$ .  \n||  \n\n  \nFigure 1: Illustration of ELLE when adapting an existing PLM corpus $\\mathcal{D}_{i}$ . We also visualize the mechanism of width / depth expansion and pre-trained domain prompts. $\\mathcal{M}_{i-1}$ trained on previous data $\\overline{{\\mathcal{D}}}_{i-1}$ to a new  \n\nTask Definition. Assume a stream of corpus $\\overline{{\\mathcal{D}}}_{N}$ from $N$ domains (e.g., news articles, web content and literary works) is sequentially gathered, i.e., $\\overline{{D}}_{N}=\\{D_{1},\\ldots,D_{N}\\}$ whole training process can be partitioned into sev, where $\\mathcal{D}_{i}=\\{\\mathbf{x}_{i}^{j}\\}_{j=1}^{|\\mathcal{D}_{i}|}$ . The eral stages. Initially, we have a PLM has been well trained on $\\mathcal{D}_{1}$ , and for the $\\mathcal{M}_{1}$ $i$ -th stage , which Assume in this stage, we only have limited compu$(i\\,>\\,1)$ ), we obtain a new collection of data $\\mathcal{D}_{i}$ .while, we expect the adapted PLM tational resources train the existing PLM edge on forget the previously learned knowledge of $\\mathcal{D}_{i}$ , and obtain a new PL $\\mathcal{R}_{i}$ , our goal is to continually pre$\\mathcal{M}_{i-1}$ to learn new knowlM$\\mathcal{M}_{i}$ sho . MeanD$\\overline{{\\mathcal{D}}}_{i-1}$ −ot .  \n\nOverall Framework. As illustrated in Figure 1 ,starting from data and construct an enlarged PLM its training efficiency. Then we perform function $\\overline{{\\mathcal{D}}}_{i-1}$ , we first expand $\\mathcal{M}_{i-1}$ , which is trained on previous $\\mathcal{M}_{i-1}$ $\\mathcal{M}_{i-1}^{\\mathrm{wD}}$ ’s width and depth −to improve recovering warmup and train knowledge of procedures are dubbed as $\\mathcal{M}_{i-1}$ to obtain function preserved $\\mathcal{M}_{i-1}^{\\mathrm{wD}}$ $\\mathcal{M}_{i-1}^{\\mathrm{WD+}}$ −−to inherit the . The above model expansion $(\\S\\ 3.2)$ . After that, we continually pre-train $\\mathcal{M}_{i-1}^{\\mathrm{WD+}}$ to gain new knowledge on $\\mathcal{D}_{i}$ .To mitigate the catastrophic forgetting on the pre−viously learned knowledge, we employ data-based memory replay on a subset of previously gathered data in the memory, wher $(1\\leq k\\leq i-1)$ $\\bar{\\overline{{D}}}_{i-1}^{s u\\bar{b}}\\,=\\,\\{\\mathcal{D}_{1}^{s u b},\\ldots,\\bar{\\mathcal{D}}_{i-1}^{s u b}\\}$ −and {D B$\\mathcal{D}_{k}^{s u b}=\\{x_{k}^{1},\\ldots,x_{k}^{B}\\}\\in\\mathcal{D}_{k}$ is the constrained memory D−}conserved size for each domain. To help PLMs disentangle the knowledge during pre-training and also stimulate the needed knowledge for each downstream task, we implant domain prompts into PLMs during the whole training process $\\left(\\S\\ 3.3\\right)$ .\n\n# 3.2 Function Preserved Model Expansion\nTo accumulate knowledge more efficiently, each time when a new corpus both sample efficiency and fast convergence brought by $\\mathcal{M}_{i-1}$ ’s width and depth to attain the superior $\\mathcal{D}_{i}$ comes, we expand larger model capacity ( Li et al. ,2020 ).  \n\nWidth Expansion. For width expansion, we borrow the function preserving initialization (FPI) from Chen et al. (2021 ). For a brief introduction, FPI expands the matrices of all modules of a Transformer layer to arbitrary larger sizes and constructs an enlarged PLM the corresponding matrices of the original through parameter replication. For example, as vi$\\mathcal{M}_{i-1}^{\\sf w}$ −.$\\mathcal{M}_{i-1}^{\\sf W}$ −is initialized using $\\mathcal{M}_{i-1}$ sualized in Figure 1 , the core principle of FPI is to divide the product of $O\\times x_{1}$ into multiple partitions, e.g. ${\\textstyle{\\frac{o}{2}}}\\times x_{1}+{\\textstyle{\\frac{o}{2}}}\\times x_{1}$ 2 ×.ally, FPI expands a ma$W^{\\prime}\\in\\mathbb{R}^{(h_{1}+\\Delta_{h_{1}})\\times h_{2}}$ $\\bar{\\b{W}}\\in\\mathbb{R}^{h_{1}\\bar{\\times}h_{2}}$ ∈M$\\mathcal{M}_{i-1}$ of $\\mathcal{M}_{i-1}^{\\sf W}$ −to an enlarged matrix as follows:  \n\n$$\n\\begin{array}{r l}&{m(i)=\\displaystyle\\left\\{i\\!\\!\\begin{array}{r l}&{i\\in[1,h_{1}]}\\\\ {U(\\{1,\\dots,h_{1}\\})}&{i\\in(h_{1},h_{1}+\\Delta_{h_{1}}],}\\end{array}\\!\\!\\right.}\\\\ &{\\quad\\quad C_{i}=\\displaystyle\\sum_{i^{\\prime}=1}^{h_{1}+\\Delta_{h_{1}}}\\mathbb{I}(m(i^{\\prime})=m(i)),}\\\\ &{W_{(i,*)}^{\\prime}=\\displaystyle\\frac{1}{C_{i}}\\cdot W_{(m(i),*)}+\\mathbb{I}(C_{i}>1)\\cdot\\delta_{i},}\\end{array}\n$$  \n\nwhere $U(\\cdot)$ denotes a uniform sampling function, $m(\\cdot)$ denotes the mapping function between two matrices, $\\mathbb{I}(\\cdot)$ is an indicator function, $C_{i}$ counts how many partitions a specific neuron is splitted and $\\pmb{\\delta}_{i}\\,\\in\\,\\mathbb{R}^{h_{2}}$ is om g n noise. FPI ensures that both imately the same functionality, i.e., both models M$\\mathcal{M}_{i-1}^{\\sf w}$ −and M$\\mathcal{M}_{i-1}$ −have approxhave almost the same output given the same input. Besides function preservation, the initialized model could serve as a good starting point for further optimization. We refer readers to Chen et al. (2021 )for more details about width expansion. Different from Chen et al. (2021 ), we additionally introduce random noises $\\delta_{i}$ into the newly copied parameters of $W^{\\prime}$ during initialization. These slight noises would break the symmetry after the replication and accelerate later pre-training.  \n\nDepth Expansion. For depth expansion, previous works generally resort to stacking all the original PLM layers into $2\\times$ layers through parameter replication ( Gong et al. ,2019 ). Such initialization is demonstrated to improve training efficiency.  \n\nHowever, the above layer stacking method restricts the number of layers of the enlarged PLM $\\mathcal{M}_{i-1}^{\\mathrm{D}}$ to be integer multiples of that of the original PLM −$\\mathcal{M}_{i-1}$ , which is not flexible for practical uses. To improve the expansion flexibility so that $\\mathcal{M}_{i-1}$ could be expanded with arbitrary number of layers, we propose a novel layer insertion method to con$1\\leq L^{\\prime}\\leq L$ struct a new PLM . Specifically, we randomly select $\\mathcal{M}_{i-1}^{\\mathrm{D}}$ −with $L+L^{\\prime}$ layers, wh $L^{\\prime}$ layers from and insert the replication layer right before / after $\\mathcal{M}_{i-1}$ , copy each layer’s parameters the original layer. We found empirically that inserting the copied layer into other positions would cause a performance drop, and the reason is that it will violate the processing order of the original layer sequence and break the PLM’s original functionality. At each expansion stage when new data comes, since different layers have different functionalities, we always choose those layers that have not been copied before to help PLMs develop in an all-around way, instead of just developing a certain kind of functionality. Since both width expansion and depth expansion are compatible with each other, we simultaneously expand both of them to $\\mathcal{M}_{i-1}$ construct an enlarged model ’s knowledge contained in the parameters. $\\bar{\\mathcal{M}}_{i-1}^{\\mathrm{WD}}$ −, which inherits Function Recovering Warmup. Since the above model expansion cannot ensure exact function preservation and inevitably results in functionality loss and performance drops, we pre-train the initialized PLM previous corpora to recover the language abilities lost during model $\\overline{{\\mathcal{D}}}_{i-1}^{s u b}$ −conserved in the memory $\\mathcal{M}_{i-1}^{\\mathrm{wD}}$ −on the expansion, which is dubbed as function recovering warmup (FRW). After the warmup, we obtain $\\mathcal{M}_{i-1}^{\\mathrm{WD+}}$ , which successfully inherits the knowledge from −$\\mathcal{M}_{i-1}$ and is also well-prepared for the next  \n\ntraining stage.',
                          'paper_title': 'ELLE: Efficient Lifelong Pre-training for Emerging Data',
                          'source_name': 'ACL', 'year': '2022', 'chunk_id': 1,
                          'chunk_ext_id': 454896046152090126},
                      {
                          'chunk_content': '# 5. Related Work\n\n# 5.1. Vision-and-language Navigation\nIn navigation-only VLN datasets such as R2R [ 2 ], models that generate the action sequence end-to-end with a Transformer model can already achieve a good performance [ 25 ,35 ]. Recent work [ 11 ,16 ,21 ,23 ] employs BERT and its variants [ 7 ,20 ] to get better language understanding. These models jointly learn the linguistic and visual representations with cross-attention for grounding.  \n\nHowever, in more complex VLN, or embodied instruction following, datasets such as ALFRED [ 32 ], hierarchical planning models [ 3 ,17 ,24 ] that separate the high-level and low-level planning have proven to be most effective. These models use pretrained BERT models to generate high-level plans and construct a semantic map to guide the agent to find the target objects specified in the high-level plan.  \n\nRecent work has shown that hierarchical planning models are advantageous in the low-data regime. $\\mathrm{(SL)}^{3}$ [31 ] uses $10\\%$ of ALFRED’s training data to learn how to generate natural language subtasks and then match primitive actions to each subtask. We take this modular approach one step further and propose to use large language models (LLMs) under the few-shot setting. More discussion of $({\\mathrm{SL}})^{3}$ is in Appendix A .\n\n# 5.2. General In-context Learning and Prompting\nIn-context learning [ 4 ] uses a small number of examples to prompt an LLM without requiring fine-tuning for different tasks. Dynamic retrieval of in-context examples has been shown to be critical [ 18 ], which we adopt in our design and further confirm its effectiveness in our setting. Compared with general in-context learning, a unique challenge for embodied agent planning is grouding to the current environment. We propose a simple but effective grounded replanning algorithm to do that.  \n\nStudies have shown that LLMs are sensitive to the prompt design, especially in the few-shot or zero-shot setting [ 14 ,26 ]. As part of the contribution of this work, we carefully validate different design choices for the prompt under the true few-shot setting. The resulted prompt is potentially useful for more advanced LLM-based planners in the future.\n\n# 5.3. Prompting for VLN\nThe use of LLMs for decision making has been an increasingly popular topic for research. We highlight recent relevant studies and their differences with the current work.  \n\nPROGPROMPT [ 33 ] proposes a program-like prompt to use LLMs ( e.g ., GPT-3 [ 4 ] and Codex [ 5 ]) to generate an executable plan for robotic agents. However, it only generates a static plan and also assumes ground-truth objects and affordances from the environment. The complex prompt design also limits the number of in-context examples that can fit in the context. We do not make such assumptions, and we propose a more simple but effective prompt design. LLM-Planner can also perform grounded re-planning during the execution of a task. Furthermore, PROGPROMPT is mainly evaluated on a synthetic dataset with relatively short trajectories. In contrast, LLM-Planner can be easily integrated with existing embodied agents and we show that it can achieve competitive end-to-end performance on the standard ALFRED benchmark with long-horizon tasks (on average 50 actions). LM-Nav [ 30 ] prompts LLMs with raw navigation instructions and 3 in-context examples to generate a list of landmarks for a vision-language model to infer a joint probability distribution over landmarks and images. Language planner [ 13 ] asks LLMs to generate a free-form instruction given a prompt with an in-context example and a goal. A pretrained BERT-like model then translate the freeform instruction into an standardized action names, which is later appended to the prompt for future planning. However, LLM-Planner can perform high-level planning in a single step and does not require a pre-trained translation model to standardize action names. Saycan [ 1 ] uses an LLM to score and rank a list of pre-defined admissible actions, which is then combined with an affordance function which assigns higher weights to the objects appearing in the current scene. PLANner [ 22 ] uses commonsense-infused prompts to generate a static HLP and matches it to the closest admissible action using a Sentence-Transformer [ 28 ].  \n\nExisting works use LLMs as a static generator for HLP [ 12 ,13 ,22 ,30 ,33 ,40 ], use LLMs as a ranker that ranks all admissible actions in the current environment [ 1 ,22 ], or add ground-truth environmental input to the LLM prompt [ 33 ]. In contrast, LLM-Planner is able to ground the LLM to the current environment by only using a pretrained vision model. Furthermore, it can directly predict HLP without relying on a list of admissible actions in the current environment. At last, LLM-Planner can perform the aforementioned capabilities while re-planning during the task execution to dynamically adapt the high-level plans to the current environment. With careful prompt design and other techniques for better in-context learning, we show that LLM-Planner can generate complete and high-quality highlevel plans that are grounded in the current environment with a fraction of labeled data.',
                          'paper_title': 'LLM-Planner: Few-Shot Grounded Planning for Embodied Agents with Large Language Models',
                          'source_name': 'ICCV', 'year': '2023', 'chunk_id': 7,
                          'chunk_ext_id': 454848411301477816},
                      {
                          'chunk_content': '# 2. Preliminaries and Related Work\n\n# 2.1. Multi-modal Large Language Models\nWith the development of Large Language Models (LLMs) like the GPT series [ 2 ,26 ,28 ], as well as open-source LLMs such as the LLaMA series [ 31 ,32 ] and Vicuna [ 5 ], Multimodal Large Language Models (MLLMs) have emerged. Examples of such MLLMs include LLaVA [ 20 ], InstructBLIP [ 6 ], and LAMM [ 38 ], among others [ 4 ,10 ,27 ,37 ,40 ]. In this work, we introduce an MLLM called MineLLM, which is specifically designed and trained for Minecraft, and leverage its perception, interaction, and analysis capabilities to build up Percipient for MP5 , and further enable an objective-conditioned active perception scheme.\n\n# 2.2. Agents in Minecraft\nPrevious works[ 3 ,7 ,9 ,11 ,18 ,21 ,39 ] attempt to use approaches such as hierarchical RL, goal-based RL, and reward shaping to train an agent in Minecraft. MineCLIP [ 9 ]enables the resolution of various open-ended tasks specified in free language, even without any manually designed dense rewards. DreamerV3 [ 12 ] succeeds in training agents in Minecraft with a learned world model. VPT [ 1 ] builds a foundation model for Minecraft by learning from massive videos. Based on VPT, Steve-1 [ 17 ] also explores bringing in MineCLIP [ 9 ] to get an instruction following policy with high performance. The development of recent large language model-related work Voyager [ 33 ], DEPS [ 35 ], GITM [ 41 ]further promote the advancement of agents in long-horizon tasks. These works use pre-trained large language models as the zero-shot planners[ 14 ] for agents, leveraging the powerful reasoning capabilities of large language models to obtain continuous operation instructions or executable policy lists.  \n\nWe take advantage of the reasoning capability of LLM to build up our own agent. Existing LLM agents [ 41 ,41 ] in Minecraft feed scene data from simulation platforms [ 9 ,11 ]into large language models for task planning. However, for embodied agents in real scenes, it is clearly unrealistic to use accurate scene data directly. Therefore, agents need to be robust to make decision corrections despite inaccurate or erroneous perception information. Moreover, open-ended tasks need hierarchical reasoning [ 21 ] and complex openended context understanding [ 1 ,9 ], classical perception networks can only output fixed perception results and cannot provide corresponding perception information according to the task, making it impossible to understand open-ended scenarios. Therefore, we design MP5 , an embodied agent with open-ended capabilities that can solve the problem of open-ended tasks.  \n\n  \nFigure 2. Overview of module interaction in MP5 . After receiving the task instruction, MP5 first utilizes Parser to generate a sub-objective list. Once a sub-objective is passed to the Planner, the Planner Obtaining Env. Info. for Perception-aware Planning. The performer takes frequently Perception-aware Execution to interact with the environment by interacting with the Patroller. Both Perception-aware Planning and Execution rely on the Active Perception between the Percipient and the Patroller. Once there are execution failures, the Planner will re-schedule the action sequence of the current sub-objective. Mechanisms for collaboration and inspection of multiple modules guarantee the correctness and robustness when MP5 is solving an open-ended embodied task.\n\n# 3. Method\nIn this section, we first give an overview of our proposed $M P5$ , for solving context-dependent and process-dependent tasks in an open-world and embodied environment, such as Minecraft (Sec. 3.1 ). Next, we elaborate on how to implement an active perception scheme (Sec. 3.2 ). This scheme plays a vital role in $M P5$ to solve context-dependent tasks, since it reliably grounds the visual content according to different kinds of objectives, and thus strengthens the planning skill and execution correctness with respect to contextdependent tasks. Then, we show how to plan and update action sequences in awareness of the situations, and how to reliably execute these actions in an embodied environment (Sec. 3.3 ). Finally, we give necessary implementation details about MP5 in Sec. 3.4 .\n\n# 3.1. Overview\nAs demonstrated in Fig. 2 , our MP5 includes five major modules, i.e ., Parser, Percipient, Planner, Performer, and Patroller. To be specific, Percipient is a parameter-efficiently fine-tuned Multimodal Large Language Model (MLLM) that is specified to the Minecraft environment. The Parser, Planner, and Patroller are pre-trained Large-language Models (LLMs). We also include retrieval-augmented generation (RAG) to enhance the quality of responses generated by Parser and Planner. Performer is an interface that explains each action from the action sequence into executable commands that directly control the game character.  \n\nWhy can MP5 solve context-dependent and processdependent tasks? MP5 includes an active perception scheme by means of multi-round interactions between Percipient and Patroller, which is to actively perceive the environmental information in the observed images, with respect to various objectives raised by Planner or Performer. With the help of this scheme, Planner can schedule or update action sequences in awareness of the observed images, inventory status and etc ., resulting in a situation-aware planning ;Performer can execute actions that are adapted to the embodied environment, resulting in a embodied action execution .Patroller in this scheme can also feedback on better choices of plans/actions based on the visual evidence so that the process-dependent tasks are solved with fewer chances of context-dependent execution failures. Moreover, Percipient can understand open-ended visual concepts, therefore it allows MP5 to solve tasks that are never seen before. Based on these merits, MP5 is able to solve context-dependent and process-dependent tasks in an open-world embodied environment.  \n\n  \nFigure 3. A demonstration of the process of Active Perception scheme. Temporary Env. Info. Set saves information collected in the current scenario, so it should be reset at the beginning of Active Perception scheme. Performer then invokes Patroller to start asking Percipient questions with respect to the description of the sub-objective and the current execution action round by round. The responses of Percipient are saved in Temporary Env. Info. Set and are also gathered as the context for the next question-answering round. After finishing asking all significant necessary questions, Patroller will check whether the current execution action is complete by analyzing the current sub-objective with Perceived env info. saved in Temporary Env. Info. Set, therefore complex Context-Dependent Tasks could be solved smoothly.  \n\nHow does MP5 function? In Fig. 2 , upon receiving a high-level task, MP5 first utilizes the Parser to generate a sequence of short-horizon sub-objectives, as a list of rich instructions in natural languages. The feasibility of the generated sub-objectives is augmented by retrieving an external Knowledge Memory ( e.g ., online wiki, user tips, and etc .). To one sub-objective, Planner schedules the action sequence that is grounded by the environmental information gathered by the active perception scheme. In this case, Performer will execute the actual actions by explaining the action sequence that is adapted to the embodied environment, via frequent interaction with the active perception scheme. Once there are execution failures (determined by Patroller), Planner will re-schedule the action sequence of the current sub-objective, or even update the following sub-objectives if some necessary sub-objectives are missing. Otherwise, the agent will go to the next sub-objective and schedule new action sequences, whilst the successful action sequence of the current subobjective will be stored in the external memory of Planner (called Performer Memory), along with the agent situation when it was planned. In the end, the agent will stop when the final sub-objective of the task has been reached.',
                          'paper_title': 'MP5: A Multi-modal Open-ended Embodied System in Minecraft Via Active Perception',
                          'source_name': 'CVPR', 'year': '2024', 'chunk_id': 1,
                          'chunk_ext_id': 454849252850590444},
                      {
                          'chunk_content': '# 1 I NTRODUCTION\nIn recent years, the field of robot learning has witnessed a significant transformation with the emergence of Large Language Models (LLMs) as a mechanism for injecting internet-scale knowledge into robotics. One paradigm that has been particularly effective is LLM planning over a predefined set of skills ( Ahn et al. ,2022 ;Singh et al. ,2023 ;Huang et al. ,2022b ;Wu et al. ,2023 ), producing strong results across a wide range of robotics tasks. These works assume the availability of a pre-defined skill library that abstracts away the robotic control problem. They instead focus on designing methods to select the right sequence skills to solve a given task. However, for robotics tasks involving contactrich robotic manipulation (Fig. 1 ), such skills are often not available, require significant engineering effort to design or train a-priori or are simply not expressive enough to address the task. How can we move beyond pre-built skill libraries and enable the application of language models to general purpose robotics tasks with as few assumptions as possible? Robotic systems need to be capable of online improvement over low-level control policies while being able to plan over long horizons.  \n\nEnd-to-end reinforcement learning (RL) is one paradigm that can produce complex low-level control strategies on robots with minimal assumptions ( Akkaya et al. ,2019 ;Herzog\\* et al. ,2023 ;Handa et al. ,2022 ;Kalashnikov et al. ,2018 ;2021 ;Chen et al. ,2022 ;Agarwal et al. ,2023 ). Unlike hierarchical approaches which impose a specific structure on the agent which may not be applicable to all tasks, end-to-end learning methods can, in principle, learn a better representation directly from data. However, RL methods are traditionally limited to the short horizon regime due to the significant challenge of exploration in RL, especially in high-dimensional continuous action spaces characteristic of robotics tasks. RL methods struggle with longer-horizon tasks in which high-level reasoning and low-level control must be learned simultaneously; effectively decomposing tasks into sub-sequences and accurately achieving them is challenging in general ( Sutton et al. ,1999 ;Parr & Russell ,1997 ).  \n\nOur key insight is that LLMs and RL have complementary strengths and weaknesses. Prior work ( Ahn et al. ,2022 ;Huang et al. ,2022a ;Wu et al. ,2023 ;Singh et al. ,2023 ;Song et al. ,2023 ) has shown that when appropriately prompted, language models are capable of leveraging internet scale knowledge to break down long-horizon tasks into achievable sub-goals, but lack a mechanism to produce low-level robot control strategies Wang et al. (2023 ), while RL can discover complex control behaviors on robots but struggles to simultaneously perform long-term reasoning ( Nachum et al. ,2018 ). However, directly combining the two paradigms, for example, via training a language conditioned policy to solve a new task, does not address the exploration problem. The RL agent must now simultaneously learn language semantics and low-level control. Ideally, the RL agent should be able to follow the guidance of the LLM, enabling it to learn to efficiently solve each predicted sub-task online. How can we connect the abstract language space of an LLM with the low-level control space of the RL agent in order to address the long-horizon robot control problem? In this work, we propose a learning method to solve long-horizon robotics tasks by tracking language model plans using motion planning and learned low-level control. Our approach, called PlanSeqLearn (PSL), is a modular framework in which a high-level language plan given by an LLM ( Plan ) is interpreted and executed using motion planning ( Seq ), enabling the RL policy ( Learn ) to rapidly learn short-horizon control strategies to solve the overall task. This decomposition enables us to effectively leverage the complementary strengths of each module: language models for abstract planning, vision-based motion planning for task plan tracking as well as achieving robot states and RL policies for learning low-level control. Furthermore, we improve learning speed and training stability by sharing the learned RL policy across all stages of the task, using local observations for efficient generalization, and introducing a simple, yet scalable curriculum learning strategy for tracking the language model plan. To our knowledge, ours is the first work enabling language guided RL agents to efficiently learn low-level control strategies for long-horizon robotics tasks.  \n\n  \nFigure 1: Long horizon task visualization. We visualize PSL solving the NutAssembly task, in which the goal is to put both nuts on their respective pegs. After predicting the high-level plan using an LLM, PSL computes a target robot pose, achieves it using motion planning and then learns interaction via RL ( third row ).  \n\nOur contributions are: 1) A novel method for long-horizon robot learning that tightly integrates large language models for high-level planning, motion planning for skill sequencing and RL for learning low-level robot control strategies; 2) Strategies for efficient policy learning from highlevel plans, which include policy observation space design for locality, shared policy network and reward function structures, and curricula for stage-wise policy training; 3) An extensive experimental evaluation demonstrating that PSL can solve over 25 long-horizon robotics tasks with up to 10 stages, outperforming SOTA baselines across four benchmark suites at success rates of over ${\\bf85}\\%$ purely from visual input. PSL produces agents that solve challenging long-horizon tasks such as NutAssembly at $96\\%$ success rate.',
                          'paper_title': 'Plan-Seq-Learn: Language Model Guided RL for Solving Long Horizon Robotics Tasks',
                          'source_name': 'ICLR', 'year': '2024', 'chunk_id': 1,
                          'chunk_ext_id': 454895466169250874},
                      {
                          'chunk_content': '# 4 PRE -TRAINING & F INE -TUNING LLM S FOR REC -OMMENDER SYSTEMS\nIn general, there are three key steps in developing and deploying LLMs in recommendation tasks, namely, pretraining, fine-tuning, and prompting. In this section, we first introduce the pre-training and fine-tuning paradigms, which are shown in Figure 3 and Figure 4, respectively. More specifically, we will focus on the specific pre-training tasks applied in LLMs for recommender systems and finetuning strategies for better performance in downstream recommendation tasks. Note that the works mentioned below are summarized in Table 1 and Table 2.  \n\nTable 1: Pre-training methods for LLM-empowered RecSys.   \n\n\n<html><body><table><tr><td>Paradigms</td><td>Methods</td><td>Pre-training tasks</td></tr><tr><td rowspan="5">Pre-training</td><td rowspan="2">PTUM [69]</td><td>MaskedBehaviorPrediction</td></tr><tr><td>NextKBehavior Prediction</td></tr><tr><td rowspan="2">M6 [58]</td><td>Text-infilling</td></tr><tr><td>Auto-regressive Generation</td></tr><tr><td>P5 [60]</td><td>Multi-taskModeling</td></tr></table></body></html>\n\n# 4.1 Pre-training Paradigm for Recommender Systems\nPre-training is an important step in developing LLMs. It involves training LLMs on a vast amount of corpus consisting of diverse and unlabeled data. This strategy enables LLMs to acquire a broad understanding of various linguistic aspects, including grammar, syntax, semantics, and even common sense reasoning. Through pre-training, LLMs can learn to recognize and generate coherent and  \n\n  \n\nFigure 3: An illustration of two main pre-training methods of LLMs: Masked Language Modeling (left) which randomly masks tokens or spans in the sequence and requires LLMs to generate the masked tokens or spans based on the remaining context, and Next Token Prediction (right) which requires prediction for the next token based on the given context. In pre-training, LLMs are trained on a vast amount of corpus consisting of diverse and unlabeled data.  \n\n  \n\nFigure 4: An illustration of two main fine-tuning methods of LLMs: Full-model Fine-tuning (left) which involves changing the entire model weights, and Parameter-efficient Fine-tuning (right) which involves fine-tuning a small proportion of model weights or a few extra trainable weights while fixing most of the parameters in LLMs. In fine-tuning, LLMs are trained on a relatively small amount of corpus ( i.e., compared to the amount of corpus for pre-training) of task-specific data.  \n\ncontextually appropriate responses. In general, there are two main methods to pre-train LLMs in the natural language domain, depending on the adopted model structure. One is Masked Language Modeling (MLM) for encoder-only or encoder-decoder Transformer structures, which randomly masks tokens or spans in the sequence and requires LLMs to generate the masked tokens or spans based on the remaining context [70]. The other is Next Token Prediction (NTP) for decoder-only Transformer structures, which requires prediction for the next token based on the given context [40].  \n\nIn the context of recommender systems, most of the existing works follow the two classical pre-training strategies. Next, we will introduce representative methods. PTUM [69] proposes two similar pre-training tasks, Masked Behavior Prediction (MBP) and Next K behavior Prediction (NBP), to model user behaviors in recommender systems. Unlike language tokens, user behaviors are more diverse and thus more difficult to be predicted. In this case, instead of masking a span of tokens, PTUM only masks a single user behavior with the goal of predicting the masked behavior based on the other behaviors in the interaction sequence of the target user. On the other side, NBP models the relevance between past and future behaviors, which is crucial for user modeling. The goal of NBP is to predict the next $k$ behaviors based on the user-item interaction history.  \n\nM6 [58] also adopts two pre-training objectives motivated by the two classical pre-training tasks, namely a textinfilling objective and an auto-regressive language generation objective, corresponding to the above two pre-training tasks, respectively. To be more specific, the text-infilling objective exhibits the pre-training task of BART [71], which randomly masks a span with several tokens in the text sequence and predicts these masked spans as the pre-training target, providing the capability to assess the plausibility of a text or an event in the recommendation scoring tasks. Meanwhile, the auto-regressive language generation objective follows the Next Token Prediction task in natural language pre-training, but it is slightly different as it predicts the unmasked sentence based on the masked sequence.  \n\nAdditionally, P5 adopts multi-mask modeling and mixes datasets of various recommendation tasks for pre-training. In this case, it can be generalized to various recommendation tasks and even unseen tasks with zero-shot generation ability [60]. Across different recommendation tasks, P5 applies a unified indexing method for representing users and items in language sequence as stated in Section 3 so that the Masked Language Modelling task could be employed.\n\n# 4.2 Fine-tuning Paradigm for Recommender Systems\nFine-tuning is a crucial step in deploying pre-trained LLMs for specific downstream tasks. Especially for recommendation tasks, LLMs require fine-tuning to grasp more domain knowledge. Particularly, fine-tuning paradigm involves training the pre-trained model based on task-specific recommendation datasets that include user-item interaction behaviors ( e.g. , purchase, click, ratings) and side knowledge about users and items ( e.g. , users’ social relations and items’ descriptions). This process allows the model to specialize its knowledge and parameters to improve performance in the recommendation domain. In general, fine-tuning strategies can be divided into two categories according to the proportion of model weights changed to fit the given task. One is full-model fine-tuning, which changes the entire model weights in the fine-tuning process. By considering the computation cost, the other is parameter-efficient fine-tuning, which aims to change only a small part of weights or develop trainable adapters to fit specific tasks.  \n\nTable 2: Fine-tuning methods applied in LLM-empowered RecSys.   \n\n\n<html><body><table><tr><td>Paradigms</td><td>Methods</td><td>References</td></tr><tr><td rowspan="2">Fine-tuning</td><td>Full-model Fine-tuning</td><td>[72], [9]]]] and [77]</td></tr><tr><td>Parameter-efficient Fine-tuning</td><td>[57] and [58]</td></tr></table></body></html>',
                          'paper_title': 'Recommender Systems in the Era of Large Language Models (Llms)',
                          'source_name': 'IEEE Transactions on Knowledge and Data Engineering', 'year': None,
                          'chunk_id': 4, 'chunk_ext_id': 454845782856070726},
                      {
                          'chunk_content': '# QUANTIFYING LANGUAGE MODELS ’ S ENSITIVITY TO SPURIOUS FEATURES IN PROMPT DESIGN or: How I learned to start worrying about prompt formatting\nMelanie Sclar 1 Yejin Choi Yulia Tsvetkov 1 Alane Suhr 3   \n1 Paul G. Allen School of Computer Science & Engineering, University of Washington   \n2 Allen Institute for Artificial Intelligence   \n3 University of California, Berkeley\n\n# A BSTRACT\nAs large language models (LLMs) are adopted as a fundamental component of language technologies, it is crucial to accurately characterize their performance. Because choices in prompt design can strongly influence model behavior, this design process is critical in effectively using any modern pre-trained generative language model. In this work, we focus on LLM sensitivity to a quintessential class of meaning-preserving design choices: prompt formatting. We find that several widely used open-source LLMs are extremely sensitive to subtle changes in prompt formatting in few-shot settings, with performance differences of up to 76 accuracy points when evaluated using LLaMA-2-13B. Sensitivity remains even when increasing model size, the number of few-shot examples, or performing instruction tuning. Our analysis suggests that work evaluating LLMs with prompting-based methods would benefit from reporting a range of performance across plausible prompt formats, instead of the currently-standard practice of reporting performance on a single format. We also show that format performance only weakly correlates between models, which puts into question the methodological validity of comparing models with an arbitrarily chosen, fixed prompt format. To facilitate systematic analysis we propose F ORMAT SPREAD , an algorithm that rapidly evaluates a sampled set of plausible prompt formats for a given task, and reports the interval of expected performance without accessing model weights 1 .Furthermore, we present a suite of analyses that characterize the nature of this sensitivity, including exploring the influence of particular atomic perturbations and the internal representation of particular formats.\n\n# 1 I NTRODUCTION\nAs the capabilities of LLMs have rapidly improved, their sensitivity to input prompt features has been used to optimize performance via prompt engineering (White et al., 2023). However, there has been little work in characterizing this sensitivity, especially to seemingly innocuous feature choices that preserve prompt meaning and intent. In this work, we analyze the sensitivity of widely used, open-source LLMs to a class of features that should not influence a prompt’s interpretation: formatting choices. We find that pre-trained LLMs are sensitive to these choices in unpredictable ways, with accuracy varying in up to 76 points for LLaMA-2-13B between equivalent formats, and ${\\sim}10$ accuracy points on average across ${50+}$ tasks and several models. We also show that this variance is not eliminated by adding few-shot examples, increasing model size, or instruction tuning.  \n\nDesigning prompt templates is a critical part of effectively using a pre-trained language model. This design process includes making choices about wording, choosing few-shot examples for in-context learning, and making decisions about seemingly trivial features like formatting. This process, and often even the resulting templates, is rarely reported or discussed in research papers, under the assumption that performance variance across these choices is insignificant compared to variance across data points or models. However, some anecdotal evidence points to formatting choices actually having a significant influence on model behavior (Aghajanyan, 2023). In some cases, researchers report a limited number of manually generated formats to show that scaling trends hold despite performance being significantly different (Schick et al., 2021). The assumption that formatting does not influence overall model performance may become problematic when improvements over existing approaches are attributed to the amount and source of training data, number of parameters, or model architecture, without also accounting for changes in prompt format. Ignoring variance across formats may also negatively affect user experience, e.g. if users inadvertently choose formats the LLM does not perform well on.  \n\n  \nFigure 1: Slight modifications in prompt format templating may lead to significantly different model performance for a given task. Each <text> represents a different variable-length placeholder to be replaced with actual data samples. Example shown corresponds to 1-shot LLaMA-2-7B performances for task280 from SuperNaturalInstructions (Wang et al., 2022). This StereoSet-inspired task (Nadeem et al., 2021) requires the model to, given a short passage, classify it into one of four types of stereotype or anti-stereotype (gender, profession, race, and religion).  \n\nOur proposed tool, F ORMAT SPREAD , enables a systematic analysis of these variances across a wide set of semantically equivalent prompt formats within a user-specified computational budget. We find that choices in formatting few-shot examples during in-context learning introduce spurious biases that may lead to significantly different conclusions in model performance. The sensitivity to formatting choices that we discover across widely-used, open-source models suggests that future research would benefit from reporting a performance spread over a sufficient sample of plausible formats, instead of simply reporting the formatting used and its performance, as is currently standard. Moreover, we argue that this reporting is crucial when comparing the performance of different models, as we show the influence of formatting choices only weakly correlates between models, thus making and fixing a formatting choice could introduce a significant confounding factor.  \n\nFully exploring the space of prompt formats is intractable, as computation costs scale linearly with the number of formats considered. F ORMAT SPREAD efficiently explores the space of prompt formats under a user-specified computational budget using Bayesian optimization. F ORMAT SPREAD does not require access to the model weights, allowing its use on API-gated models: we find a spread up to 56 accuracy points with a median spread of 6.4 accuracy points with GPT3.5 across 320 formats and 53 tasks at a cost of under 10USD on average per task. Beyond facilitating evaluation, we also propose a suite of analyses to further characterize model sensitivity to formatting. Among other results, we show that the separability of continuous prompt embeddings correlates with the spread observed in task performance.',
                          'paper_title': "Quantifying Language Models' Sensitivity to Spurious Features in Prompt Design Or: How I Learned to Start Worrying about Prompt Formatting",
                          'source_name': 'ICLR', 'year': '2024', 'chunk_id': 0,
                          'chunk_ext_id': 454845939362342786},
                      {
                          'chunk_content': '# 6 Implementations\n6.1 Model architectures . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14   \n6.2 Pipeline scheduling . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14   \n6.3 Inference service . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14\n\n# 7 Experiments\n7.1 Training . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 15   \n7.2 Inference . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 16  \n\n8 Conclusions 20\n#\n20   \nA.1 Methodology . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 21   \nA.2 Analysis . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 21\n\n# 1 Introduction\nLarge language models (LLMs) has amazed the world with their astonishing abilities and performance in solving a wide range of problems [ 4 ,29 ,7 ,56 ,46 ,47 ]. This is accompanied by excessive costs and carbon emissions for training and deploying these models, as their sizes grow rapidly in recent years. In general, costs for inference are dominant in the long run, as each model will be deployed to solve many problems for a long period of time. This has inspired researchers and engineers to develop various approaches for accelerating LLM inference.  \n\nThe focus of this work is early exiting , which accelerates inference by allowing a deep neural network to make predictions and exit early in the network for certain inputs, without running the forward pass through the full network. This is achieved by augmenting a standard neural network architecture (with a single exit at the end) with additional early-exit layers that transform intermediate hidden states into early outputs. The early-exit model architecture, as visualized in Figure 1 , not only retains the full capacity of a large model, but is also capable of adaptively using a smaller amount of computation for solving simpler problems. The idea of early exiting is a natural analogue of how human speaks, thinks, and make decisions: not every problem requires or deserves the same amount of consideration, and one shall opt for fast reaction to simple problems without overthinking. Early exiting has been an active research area and widely applied in natural language processing, computer vision, and other areas [ 13 ,24 ,15 ,59 ,11 ,38 ,51 ,37 ,52 ,45 ,16 ,18 ,14 ,53 ]. More recently, it starts to gain attention in the LLM domain [ 37 ,9 ,2 ,48 ], and is recognized as a promising direction for further reducing the latency and costs of LLM inference [ 31 ].\n\n# 1.1 Goal and motivations\nThe primary goal of this work is to build the infrastructure for scaling up training and inference of earlyexit LLMs. This is motivated by the observation that the sizes of early-exit models in prior works are still relatively small. The largest early-exit LLM that we are aware of is an OPT model [ 56 ] with 6.7 billion (6.7B) parameters [ 9 ]; in contrast, standard LLMs at much larger scales, e.g. the 175B GPT-3 [ 4 ], 530B Megatron-Turing NLG [ 42 ], 540B PaLM [ 7 ], or even larger sparsely activated models, have been well trained and deployed in many applications. It is an urgent need for the community to truly understand the efficacy of early exiting for LLMs at larger scales, which is indispensable towards making early exiting a useful and practical option in complex scenarios that only sufficiently large LLMs can handle.',
                          'paper_title': 'EE-LLM: Large-Scale Training and Inference of Early-Exit Large Language Models with 3D Parallelism',
                          'source_name': 'ICML', 'year': '2024', 'chunk_id': 1,
                          'chunk_ext_id': 454895298406254068},
                      {
                          'chunk_content': '# II. R ELATED WORK\nIn this section, we provide a literature review pertaining to Large Language Models (LLMs), Multi-modal Large Language Models (MLLMs), and LLMs for Sequential Recommendation. Our work draws inspiration from them for the fusion of LLMs and sequential recommendation systems.\n\n# A. Large Language Models\nLanguage modeling has been extensively scrutinized for language understanding and generation over the past years, thereby catalyzing the recent emergence of Language Models (LMs) [ 7 ], [ 9 ], [ 29 ]–[ 31 ]. Pretrained LMs built on the Transformer architecture, such as BERT [ 31 ] and T5 [ 30 ], have demonstrated profound versatility owing to their largescale training corpus. More recently, researchers have delved deeper into the scaling effect by augmenting the parameter and training corpus scale to an unprecedented magnitude — encompassing billions of parameters and trillions of training tokens [ 7 ]–[ 9 ], [ 27 ], [ 29 ], [ 32 ]. These LLMs manifest substantial performance enhancements and display unique capabilities, such as common sense reasoning and instruction following. Moreover, domain-specific LLMs, such as those in the domain of finance [ 33 ], medicine [ 34 ], and law [ 35 ], are constructed by integrating domain expertise with the commonsense knowledge inherent in general LLMs. These advancements inspire us to probe the potential of LLMs in the realm of recommendation systems.\n\n# B. Multi-Modal Large Language Models\nDespite their versatility and promising performance, LLMs are restricted to textual inputs. However, a vast reservoir of information and knowledge resides in other modalities, including vision, video, and audio. Consequently, researchers have proposed Multi-modal Large Language Models (MLLMs), with the objective of amalgamating the domain of text with other modalities [ 36 ], [ 37 ]. Recent MLLMs suggest that visual feature space can be harmoniously aligned with the input textual space [ 21 ], [ 22 ], [ 38 ], [ 39 ], thereby empowering them to perform language generation tasks conditioned on visual inputs. Beyond vision, researchers have incorporated other modalities, such as video [ 40 ] and audio [ 41 ], into LMs, enabling them to digest information and knowledge of other modalities. We draw inspiration from these revelations to devise LLaRA, which fuses multi-modal information to enhance the sequential recommendation.\n\n# C. LLMs for Sequential Recommendation\nSequential recommendation leverages patterns in user behavior data to predict users’ next interaction based on their historical engagement sequence. Prior studies have explored employing complex model architectures to better characterize user preference, including Recurrent Neural Networks (RNNs) [3 ], [ 42 ]–[ 45 ], Convolutional Neural Networks (CNNs) [ 4 ], [46 ], [ 47 ], and Attention mechanisms [ 5 ], [ 48 ]. Additional studies have proposed supplementary learning tasks to improve sequential recommendation performance, including causal inference [ 49 ]–[ 51 ], data augmentation [ 52 ]–[ 54 ], and robust learning [ 55 ].  \n\nWith the advent of LLMs, researchers pay increasing attention to exploring their potential for sequential recommendation. Not only the extensive world knowledge stored in LLMs could serve as a rich source of background information for items, but also the reasoning capabilities of LLMs are able to augment the next item prediction. Previous works on the integration of LLMs into recommendation, often referred to as LLM4Rec, fall into two main categories [ 56 ], [ 57 ]: (1) LLM as the recommender and (2) LLM as the enhancer:  \n\n•LLM as the recommender. It involves training from scratch [ 10 ], tuning [ 11 ]–[ 14 ], prompting [ 15 ], or in context learning (ICL) [ 16 ], [ 17 ] an LLM on recommendation data to serve as a recommender. While studies within this category have substantiated that LLMs can be imbued with recommendation capabilities, they forfeit leveraging established yet effective recommendation models, resulting in inferior performance. •LLM as the enhancer. It augments traditional recommenders with LLM tokens or embeddings [ 19 ], [ 28 ], [58 ]. It typically utilizes LLMs as feature extractors or text generators, given their exceptional ability to integrate diverse sources and forms of information, such as item metadata. Nonetheless, the actual recommendation process is still done by conventional models, leaving the LLM’s reasoning skills untouched.  \n\nTo the best of our knowledge, LLaRA is a pioneering work that aligns traditional sequential recommendation models with LLMs. We not only capitalize on the sequential behavioral patterns learned by the well-trained traditional sequential models, but also utilize the reasoning ability and background knowledge inherent in the LLM.\n\n# III. P RELIMINARY\nTask Formulation. Sequential recommendation aims to predict the next item that aligns with a user’s interests, based on his/her historical interaction sequence [ 1 ], [ 2 ]. Formally, given a user who has chronologically engaged with item sequence $[i_{1},i_{2},...,i_{n}]$ , a sequential recommender entails predicting the next item $i_{n+1}$ this user will interact with.  \n\nCurriculum Learning. Curriculum learning [ 24 ] draws inspiration from the pedagogical strategies employed in human education, emphasizing training the model from simple to more challenging learning tasks. In general, curriculum learning contains three key stages [ 25 ]:  \n\n1) Complexity Assessment: Curriculum learning initially quantifies the complexity of each data point or task, which is then used to assign a learning priority. This step is crucial as it determines the order of tasks used for model training, and simpler tasks are typically accomplished before more challenging tasks are engaged in the learning process.   \n2) Scheduler Formulation: Based on the complexity assessment, a training scheduler can be devised to dictate the sequence and frequency of tasks that the model will be exposed to during the learning process.   \n3) Training Execution: Having devised the training scheduler, we can implement the curriculum learning process adhering to the predetermined progression.  \n\nInstruction Tuning. To enhance the capability of LLMs to better understand and respond to instructions for specific tasks, instruction tuning emerges as a pivotal approach that can substantially boost LLMs to follow human instructions. In technique, instruction tuning first requires structuring the data into the format of $(x,y)$ , where $x$ and $y$ denote the textual description of human instructions and the corresponding responses respectively. This input-output format not only encapsulates the task descriptions but also necessitates the transformation of training data into natural language, thus creating a comprehensive and coherent instructional context. Subsequently, we can tune the LLMs with $(x,y)$ following the autoregressive objective [ 6 ], [ 7 ], [ 29 ]:  \n\nDespite this reduction, it still manages to maintain acceptable performance levels. This approach effectively reduces both computational demands and processing time, making it a more efficient alternative for fine-tuning LLMs.  \n\nLoRA [ 63 ] is a typical PEFT algorithm, freezing the pretrained weights and injecting new trainable parameters. The optimizing objective of LoRA can be formulated as follows:  \n\n$$\n\\underset{\\Theta}{\\operatorname*{max}}\\sum_{(x,y)\\in\\mathcal{Z}}\\sum_{t=1}^{|y|}\\log\\left(P_{\\Phi_{0}+\\Delta\\Phi(\\Theta)}(y_{t}|x,y_{<t})\\right),\n$$  \n\nwhere LoRA introduces parameters $\\Theta$ , which are smaller in size in comparison to the original LLM parameters $\\Phi_{0}$ , and updates $\\Theta$ instead of $\\Phi$ .',
                          'paper_title': 'LLaRA: Large Language-Recommendation Assistant',
                          'source_name': 'SIGIR', 'year': '2024', 'chunk_id': 2,
                          'chunk_ext_id': 454846583944596150}],
                  content="""### Prompt Engineering and Template Design

In this chapter, we will delve into the topic of prompt engineering and template design. As mentioned earlier, the previous chapter introduced the foundational knowledge of LLMs' planning capabilities, including their architecture, pre-training and fine-tuning methods, and evaluation techniques. Building upon this foundation, we now explore how prompt engineering and template design can enhance LLMs' planning abilities.

Prompt engineering and template design are crucial for guiding LLMs to generate effective plans, especially in few-shot or zero-shot learning scenarios. These techniques involve crafting prompts and templates that provide LLMs with context, examples, and instructions to facilitate better understanding and generation of plans. According to the literature, prompt design significantly impacts LLM performance, with differences in accuracy up to 76 points observed when evaluated using LLaMA-2-13B<sup>53</sup>. Furthermore, sensitivity to prompt formatting persists even with increased model size, additional few-shot examples, or instruction tuning.

One effective approach to prompt engineering is the use of in-context learning, which involves providing a small number of examples to prompt an LLM without requiring fine-tuning for different tasks. This technique has been shown to be critical in various LLM applications, including embodied agents and visual navigation tasks<sup>50</sup>. For instance, LLM-Planner utilizes a structured prompt design consisting of six parts: task introduction, goal instruction, step-by-step instruction, plan list, object list, and retrieval message<sup>48</sup>. This design enables LLM-Planner to generate complete and high-quality high-level plans grounded in the current environment with a fraction of labeled data.

Another important aspect of prompt engineering is the design of program-like prompts. For example, PROGPROMPT proposes a program-like prompt for LLMs to generate executable plans for robotic agents<sup>50</sup>. However, this approach assumes ground-truth objects and affordances, limiting the number of in-context examples. To address this limitation, LLM-Planner grounds the LLM to the current environment using only a pre-trained vision model, directly predicting high-level plans without relying on a list of admissible actions. This approach allows for dynamic adaptation of high-level plans during task execution, achieving competitive end-to-end performance on the ALFRED benchmark with long-horizon tasks.

In summary, prompt engineering and template design play a vital role in enhancing LLMs' planning capabilities. By providing context, examples, and instructions through carefully crafted prompts and templates, LLMs can generate more effective plans, especially in few-shot or zero-shot learning scenarios. As we continue to explore and refine these techniques, we can further unlock the potential of LLMs in various planning tasks. In the next chapter, we will discuss another method for improving LLMs' planning abilities: knowledge distillation.""")
    s11 = Section(section_identifier="3_2", title="基于知识蒸馏的方法",
                  description='介绍了基于知识蒸馏的方法，包括知识蒸馏的原理和类型、基于距离度量的知识蒸馏，以及基于能量模型的知识蒸馏。',
                  reference='',
                  content="""Knowledge Distillation refers to the process of transferring knowledge from a larger, more complex teacher model to a smaller, more efficient student model. This technique has gained significant attention in the field of artificial intelligence, particularly in natural language processing, computer vision, and information retrieval. The basic concept involves training the student model using valuable information from the output of the teacher model, thereby improving the student model's performance and enabling it to perform complex tasks with improved efficiency.

In the field of knowledge distillation, various methods and techniques have been proposed to facilitate the transfer of knowledge from the teacher model to the student model. These methods can be categorized into different types based on the approach used. One common approach is logits-based knowledge distillation, where the student model mimics the logits of the teacher model by minimizing the KL-divergence of the class distribution. This method has been widely used in various domains, including information retrieval, where the teacher model is often a complex reranker with higher capacity but lower efficiency compared to the efficient dual-encoder based student model.

Another approach is feature-based knowledge distillation, which utilizes the output of intermediate layers (feature maps) as knowledge to supervise the student model's training. This method allows the student model to learn rich representations from the teacher model, enabling it to capture intricate patterns and relationships in the data. Feature-based knowledge distillation has been successfully applied in tasks such as image classification and object detection in computer vision.

Relation-based knowledge distillation, on the other hand, focuses on distilling the relation between samples rather than individual instances. This approach is particularly useful in tasks where the relationship between samples is crucial, such as knowledge graph completion and recommendation systems. By learning the relationships between samples, the student model can make more accurate predictions and recommendations.

In addition to the different types of knowledge distillation, there are also variations in the training process. Offline distillation methods involve training the teacher model separately and then using its outputs to train the student model. On the other hand, online distillation methods train the teacher and student models simultaneously, allowing for dynamic knowledge transfer and adaptation. Deep mutual learning is an example of online distillation, where multiple models are trained collaboratively to improve generalization ability and computational efficiency.

Self-supervised knowledge distillation introduces knowledge distillation to self-supervised learning, aiming to enhance the performance of small models. This approach leverages unlabeled data to learn rich representations and transfer knowledge from a pre-trained teacher model to a student model. Methods like CRD, SimCLR-v2, and SSKD have explored different ways to combine self-supervised learning with knowledge distillation, achieving promising results.

In summary, knowledge distillation is a powerful technique for transferring knowledge from large language models to smaller models, enabling them to perform complex tasks with improved performance. The different types of knowledge distillation, including logits-based, feature-based, and relation-based, offer flexibility in transferring different aspects of knowledge. Furthermore, variations in the training process, such as offline and online distillation, provide options for dynamic knowledge transfer and adaptation. In the next chapter, we will explore the application of knowledge distillation in enhancing the planning capabilities of large language models.""")
    s12 = Section(section_identifier="3_2_1", title="知识蒸馏的原理和类型",
                  description='阐述了知识蒸馏的原理和类型，例如基于表示匹配和分布匹配的知识蒸馏，以及它们在LLMs规划能力中的应用。',
                  reference=[
                      {
                          'chunk_content': '# 2 RELATED WORK\nLarge Language Models (LLMs) [5 ,13 ,33 ,49 ,50 ,56 ] have achieved great success in various natural language processing tasks, e.g. , topic classification [ 27 ,38 ], sentiment classification [ 5 ,38 ], translation [ 5 ], by few-shot prompting (or in-context learning) [ 5 ,7 ,38 ]. Recently, Wang et al. [61 ], Wei et al. [62 ]show that LLMs with more than 100B parameters ( e.g. , GPT-3 [ 5 ] with 175B, PaLM with 540B [ 9 ]) can solve complex tasks by generating multiple reasoning steps towards the answer when given a few reasoning examples as demonstration. While both GPT-3.5 [ 42 ] and GPT-4 [44 ] have shown promising reasoning ability for complex mathematical tasks like MATH [ 19 ], the performance of open-source models ( e.g. , LLaMA-1 [ 56 ], LLaMA-2 [ 57 ]) is far from satisfactory.  \n\nLearning Mathematical Reasoning for complex math tasks like GSM8K [ 10 ] and MATH [ 19 ] is one of the most challenging problem in open-source LLMs. Prompt-based methods aim to activate the potential capacities of LLMs by providing suitable prompting inputs without the need to perform model finetuning. Wei et al. [62 ]enhances the reasoning ability of LLMs by augmenting the output with a sequence of intermediate steps towards the answer. A few methods [ 16 ,61 ,68 ] are proposed to improve the quality of reasoning paths. For example, Complexity-based CoT [ 16 ] selects examples with more steps as in-context demonstrations and shows that prompting with more reasoning steps leads to better performance. Self-Consistency [ 61 ] samples multiple reasoning paths and selects the final answer by majority voting. Another category of works is finetuning-based methods, which fine-tunes open-source models ( e.g. , LLaMA) with the knowledge from some advanced closed-source LLMs [ 42 ,44 ]. Magister et al. [36 ]investigates the transfer of reasoning capabilities via knowledge distillation. Yuan et al. [64 ]proposes to apply rejection sampling finetuning (RFT) to improve mathematical reasoning performance. WizardMath [ 34 ] proposes a reinforced evol-instruct method to enhance the reasoning abilities by supervised finetuning and PPO training [ 51 ]. Wang et al. [59 ]propose a constraint alignment loss to fine-tune LLMs for calibration.  \n\nKnowledge Distillation [17 ,20 ] transfers knowledge from a larger teacher model to a smaller student model, achieving promising performance in many applications [ 18 ,39 ,46 ,52 ], Recently, [ 15 ,21 –23 ,30 ,36 ,53 ] propose to transfer reasoning abilities from LLMs ( e.g. , GPT-3.5 [ 42 ], PaLM [ 9 ]) to small language models ( e.g. , T5 [ 50 ], GPT-2 [ 49 ]). For example, Finetune-CoT [ 21 ] samples multiple reasoning paths from LLMs and finetune student models with correct ones, while Self-Improve [ 23 ]chooses the one with the highest confidence. Li et al. [30 ]further feeds the question and ground-truth label to LLMs for prompting its reasoning path. Shridhar et al. [53 ]proposes to generate sub-questions and solution pairs for training. Small models finetuned by knowledge distillation can achieve a similar performance to LLMs [ 21 ,36 ] on both common sense reasoning ( e.g. , CommonSenseQA [ 54 ]) and symbol reasoning ( e.g. , Coin Flip [ 62 ]). However, for solving challenging mathematical problems (e.g. , GSM8K [ 10 ]), there is still a large performance gap [ 15 ,21 ,36 ].\n\n# 3 METHOD\nThe overview of our method is illustrated in Figure 1 . Given a meta-question (a sample in the original mathematical training set), we can generate a series of variants. Specifically, we perform three types  \n\nof question bootstrapping. Combined with answer augmentation, we present MetaMathQA, a diverse and high-quality mathematical dataset based on GSM8K and MATH. We then present MetaMath, a familty of LLMs fine-tuned on MetaMathQA focusing on elementary mathematical problem-solving.\n\n# 3.1 A NSWER A UGMENTATION (A NS A UG )\nGenerating more reasoning paths is a simple but effective way to augment the training set. For a question $q_{i}$ , we use few-shot chain-of-thought prompting with temperature sampling 1 to generate $K_{\\mathrm{AugAng}}$ more reasoning paths $\\{(r_{i}^{(j)},a_{i}^{(j)}):j=1,\\ldots,K_{\\mathrm{AugAng}}\\}$ }: the question is appended to a few in-context reasoning examples, then fed to the LLM for generating its reasoning path $r_{i}^{(j)}$ and answer $a_{i}^{(j)}$ . We filter out reasoning paths with correct answers as:  \n\n$$\n\\mathcal{D}_{\\mathrm{AnsAug}}=\\{(q_{i},r_{i}^{(j)},a_{i}^{(j)}):a_{i}^{(j)}=a_{i}^{\\star};i=1,\\ldots,N_{q};j=1,\\ldots,K_{\\mathrm{AugAng}}\\}.\n$$\n\n# 3.2 QUESTION BOOTSTRAPPING BY LLM R EPHRASING\nGenerating more answers for mathematical questions with LLMs is straightforward, but creating questions is more challenging. The questions in GSM8K and MATH are written by well-educated teachers. Hence, enlarging the question set through manual creation is time-consuming and laborintensive. To address this issue, we propose rephrasing prompting to generate more questions through the LLM. Example 3.1 (see Example in Appendix A.1 for the complete prompt) shows a few-shot prompt used in GSM8K. Specifically, for a question $q_{i}$ , we append it to the prompt, which is then fed to the LLM for generating the rephrased question. We adopt temperature sampling to sample $K_{1}$ rephrase rephrased questions for each meta-question. For the rephrased questions, it is time-consuming to manually check the consistency compared with the original questions. We propose a supervised method to evaluate the correctness between the rephrased questions and the meta-questions. For each rephrased question $\\hat{q}_{i}^{(j)}$ , we use few-shot Chain-of-Thought prompting to generate its reasoning path $\\hat{r}_{i}^{(j)}$ and answer $\\hat{a}_{i}^{(j)}$ , which is compared with the ground-truth answer $a_{i}^{\\star}$ . The accuracy of Complexity-based CoT [ 16 ] for answering the rephrased question by GPT-3.5-Turbo is $76.30\\%$ ,which is comparable to that of answering the original training questions $(80.74\\%)$ . This suggests that the quality of rephrased questions is preserved high while the question diversity is improved. We collect the rephrased questions with correct answers ( i.e. ,$\\hat{a}_{i}^{(j)}=\\bar{a}_{i}^{\\star})$ ) as the augmented data:  \n\n$$\n\\mathcal{D}_{\\mathrm{rephrase}}=\\{(\\hat{q}_{i},\\hat{r}_{i}^{(j)},\\hat{a}_{i}^{(j)}):\\hat{a}_{i}^{(j)}=a_{i}^{\\star};i=1,\\ldots,N_{q};j=1,\\ldots,K_{\\mathrm{rephrase}}\\}.\n$$',
                          'paper_title': 'MetaMath: Bootstrap Your Own Mathematical Questions for Large Language Models',
                          'source_name': 'ICLR', 'year': '2024', 'chunk_id': 2,
                          'chunk_ext_id': 454845858577682200},
                      {
                          'chunk_content': '# 5. Conclusion\nIn this paper, we propose multi-level logit distillation, a novel approach to make better utilization of logit outputs for knowledge distillation. Concretely, we introduce multilevel alignment, which consists of instance-level, batchlevel, and class-level alignment. A prediction augmentation mechanism is proposed to boost the performance. Extensive experiment results prove the effectiveness of our method.  \n\nAcknowledgements. This project is funded in part by Shanghai AI Laboratory, CUHK Interdisciplinary AI Research Institute, the Centre for Perceptual and Interactive Intelligence (CPIl) Ltd under the Innovation and Technology Commission (ITC)’s InnoHK, and Hong Kong RGC Theme-based Research Scheme 2020/21 (No. T41-603/20- R).',
                          'paper_title': 'Multi-Level Logit Distillation', 'source_name': 'CVPR', 'year': '2023',
                          'chunk_id': 5, 'chunk_ext_id': 454847624503756824},
                      {
                          'chunk_content': '# 2.3 Knowledge Distillation\nProposed by Hinton et al . [23] , knowledge distillation is a method to train a model, called the student, using valuable information provided by the output of another model, called the teacher. This way, the teacher model’s knowledge can be transferred into the student model. The idea of knowledge distillation is wildly used in the field of computer vision [ 36 ,72 ,80 ], natural language processing [ 53 ,57 ]and information retrieval [24, 37, 42, 59, 81].  \n\nIn the field of information retrieval, it is common for the teacher model to be a complex reranker model with higher capacity but lower efficiency compared to the efficient dual-encoder based student model. Santhanam et al. [59] apply the KL divergence loss to align query-document scores between teacher and student models. Another approach is balanced topic-aware query sampling [ 24 ], which shows further improvement on top of the original knowledge distillation loss. To address the performance gap, Zeng et al . [81] propose a curriculum learning based knowledge distillation framework that trains a student model with increasing difficulty. In addition to monolingual retrieval, multilingual distillation frameworks have also been proposed. Li et al . [35] explore using query-document scores as the distillation signals. The cross-lingual token alignment task has also been studied as an optimal transport problem, with Huang et al . [26] proposing a distillation framework to build a CLIR model via bitext data.  \n\nOur model training framework is also an extension of knowledge distillation. A typical framework for knowledge distillation relies on a teacher model to solely provide target distributions [ 21 ,44 ]. Our approach has different sources of knowledge: the major knowledge is from the teacher model, and we also consider the cross-lingual knowledge shared by the prompt matrix. Moreover, from the language perspective, rather than focusing on one CLIR task, our model simultaneously learns retrieval knowledge for multiple CLIR tasks.\n\n# 3 METHODOLOGY\nOur goal is to incorporate the knowledge of query-document matching from a well-trained monolingual retrieval model into a multilingual transformer-based retrieval architecture, such that it is capable of generating contextual representations under the MLIR setting and thus performing query-document matching in different languages. In this section, we first define the MLIR task and outline our approach. Then we present the key component of our model: a soft prompt-based encoder-decoder architecture. Finally, we introduce the model training via a knowledge distillation framework and build the MLIR model with components from both the teacher and student models. Due to space limitations, we focus on the MLIR case of searching a multilingual collection with an English query as an example to describe our method. It is worth noting that English may also be included in the multiple collection.\n\n# 3.1 Overview\nGiven a query $q$ in language $X$ and a target collection ${\\mathcal{D}}_{Y}$ which contains documents in language set $Y=\\{Y_{1},Y_{2},\\dots Y_{K}\\}$ , suppose $d_{k i}$ —the $i^{\\mathrm{th}}$ document in language $Y_{k}$ —has the ground truth relevance label $R e l(q,d_{k i})$ , then the aim is to design an MLIR model $f$ that retrieves a list of documents from ${\\mathcal{D}}_{Y}$ such that  \n\n$$\nf(q,d_{k i})\\ge f(q,d_{l j}),\\quad\\forall\\:R e l(q,d_{k i})\\ge R e l(q,d_{l j})\n$$  \n\nwhere $f(\\cdot,\\cdot)$ indicates the ranking score calculated by the model. To build model $f$ , we first assume there exists an oracle model $g$ for the retrieval task in language $X$ . Thus, given $q$ and monolingual collection ${\\mathcal{D}}_{X}$ ,$g$ satisfies:  \n\n$$\ng(q,d_{x i})\\ge g(q,d_{x j}),\\quad\\forall\\,R e l(q,d_{x i})\\ge R e l(q,d_{x j})\n$$  \n\nWe ca achieve (1) with model $f^{\\prime}$ if for any $d_{*}$ in $Y$ and its translation $d_{x}$ in 𝑋, the model matches the oracle:  \n\n$$\nf^{\\prime}(q,d_{*})=g(q,d_{x})\n$$  \n\nSuppose both $f^{\\prime}$ and $g$ follow the architecture of dense retrieval, the ranking score calculation is the dot-product of the query and document embeddings, thus:  \n\n$$\nf_{E}^{\\prime}(q)f_{D}^{\\prime}(d_{*})^{\\top}=g_{E}(q)g_{D}(d_{x})^{\\top}\n$$  \n\nwhere encoders for $f_{E}^{\\prime}$ and $f^{\\prime}$ $g_{E}$ and are query encoders; $g$ respectively. We then reuse $f_{D}^{\\prime}$ and $g_{D}$ $g_{E}$ are document as the query encoder of $f^{\\prime}$ . With $f_{E}^{\\prime}=g_{E}$ , we have:  \n\n$$\ng_{E}(q)\\big(f_{D}^{\\prime}(d_{*})-g_{D}(d_{x})\\big)^{\\top}=0\n$$  \n\nIt is safe to assume $g_{E}(q)$ is a nonzero vector. Therefore the goal of finding $f^{\\prime}$ is equivalent to reducing the embedding distance between parallel documents. In our method, we retrain $g_{D}$ as the teacher model by removing its parameters from the computational graph and train $f_{D}^{\\prime}$ as the student model. Note that in practice, the oracle model $g$ does not exist. We can use an off-the-shelf Englishto-English (monolingual) dense retrieval model as a substitute for $g$ .Because $g_{D}$ is fixed, the essence of knowledge distillation training is to push multilingual document representations generated by $f_{D}^{\\prime}$ toward their corresponding English document representations generated by $g_{D}$ . Moreover, Equation (3) suggests that the training of $f_{D}^{\\prime}$ does not rely on either query $q$ or ground truth relevant judgment. A group of parallel or comparable sentences from English to any other language involved in the collection is adequate to train $f_{D}^{\\prime}$ . Parallel or comparable sentences between two languages are often referred as bitext data. Unlike multilingual retrieval data, which often require relevance labels, bitext data are easier to acquire, especially for low-resource languages [22, 63].  \n\n  \nFigure 2: SPD model architecture.',
                          'paper_title': 'Soft Prompt Decoding for Multilingual Dense Retrieval',
                          'source_name': 'SIGIR', 'year': '2023', 'chunk_id': 3,
                          'chunk_ext_id': 454845492757791766},
                      {
                          'chunk_content': '# 2.2 Knowledge Distillation\nHinton et al. (2015 ) first introduced knowledge distillation to transfer knowledge to a small model, and it has been widely used for transferring dark knowledge (which refers to information that can tell us how the model tends to generalize) and model compression in Natural Language Processing and Computer Vision. A series of follow-up works achieved gains on multilingual tasks. Sun et al. (2020 ) enhanced the generalization ability of unsupervised neural machine translation by adding self-knowledge distillation and language branch knowledge distillation. Wang et al. (2020 ) reduced the distance between monolingual teachers and the multilingual student to predict multilingual label sequences. To the best of our knowledge, Reimers and Gurevych (2020 ) is the only multilingual language model related work that applied a student model to mimic sentence representations generated from the teacher model. They fed both source and target sentences into the student model to calculate Mean Square Error (MSE) loss with the teacher model’s source sentences.\n\n# 3 Methodology\nThis section presents the training procedure and introduces our four proposed training objectives. Our goal is to improve multilingual language models by transferring semantic knowledge from English and aligning multi-level information in parallel corpora with limited resources. The general network architecture is illustrated in Figure 1 . The student network consists of three components: an encoder, a projector, and a predictor, while the teacher network contains an encoder and a projector.  \n\n  \nFigure 1: Model Architecture of our proposed Multi-level Multilingual Knowledge Distillation method which combines TLM, XWCL, SentA, and StrucA objectives and is trained in a multi-task manner.\n\n# 3.1 Translation Language Modeling\nTranslation Language Modeling (TLM) objective is an extension of MLM ( Lample and Conneau ,2019 ). Given the concatenation of parallel sentences, TLM objective predicts masks in both source and target sequences. In this way, TLM utilizes context information in the corresponding language, and thus helps the model to learn token-level alignments.  \n\nSimilar to Devlin et al. (2018 ), we randomly mask $15\\%$ tokens from input sequences and replace them with a [MASK] token $80\\%$ of the time, with a random token in vocabulary $10\\%$ of the time, and keep them unchanged $10\\%$ of the time. The input sequence is denoted as $[s_{1},\\ldots,s_{a}$ ,[SEP] ,$t_{1},\\ldots,t_{b}]$ , where a, b are numbers of tokens, and masks exist in both source and target sides. Since the teacher model only involves English, we train TLM objective on the student model.\n\n# 3.2 Cross-lingual Word-aware Contrastive Learning\nInspired by Su et al. (2021 ), we propose a crosslingual version of word-aware contrastive learning (XWCL) objective. The goal of XWCL is to encourage the student model to learn more discriminative representations. Different from Su et al. (2021 ), our student model produces representations according to the parallel context instead of surrounding monolingual words. Moreover, due to the vocabulary difference in our teacher and student models, we align the representations on the word-level.  \n\nGiven an English source sequence $\\begin{array}{r l}{s}&{{}=}\\end{array}$ $\\left[s_{1},\\ldots,s_{n}\\right]$ and a target sequence $t=[t_{1},\\dots,t_{m}]$ ,we concatenate them with a special token [SEP] and randomly mask $15\\%$ words only from source sequence $s$ following the same mask strategy in Devlin et al. (2018 ). Then, we feed this masked sequence into the student model and get representation $\\tilde{h}=[\\tilde{h_{1}},\\,.\\,.\\,.\\,,\\tilde{h}_{n+m}]$ . Meanwhile, we input the original sequence $s$ into the teacher model and get $\\boldsymbol{h}=[h_{1},\\cdot\\cdot\\cdot,h_{n}]$ as reference. Our proposed XWCL objective learns to minimize the infoNCE loss of the masked tokens:  \n\n$$\n\\mathcal{L}_{\\mathrm{XWCL}}=-\\!\\sum_{i=1}^{n}\\log m\\left(\\boldsymbol{s}_{i}\\right)\\!\\frac{\\exp\\!\\left(\\sin\\!\\left(\\tilde{h}_{i},\\!h_{i}\\right)/\\tau\\right)}{\\sum_{j=1}^{n}\\exp\\!\\left(\\sin\\!\\left(\\tilde{h}_{i},\\!h_{j}\\right)/\\tau\\right)},\n$$  \n\nwhere $\\tau$ is a temperature parameter, $\\mathrm{sim}(\\cdot,\\cdot)$ denotes dot product, $m\\left(s_{i}\\right)\\,=\\,1$ if $s_{i}$ is a masked token, otherwise $m\\left(s_{i}\\right)=0$ . Here we mask the whole word and treat the first token of each mask as the word representation. Consequently, XWCL will make masked representations produced by the student model closer to their corresponding representations in English vector space, and discriminate them from other distinct representations.\n\n# 3.3 Sentence Alignment\nBERT is well-trained with a large-scale English corpus and thus encodes rich semantic knowledge. The goal of our proposed Sentence Alignment (SentA) objective is to capture this semantic information and transfer it to mBERT. Similar to Grill et al. (2020 ), we learn representations by instance-level discrimination without negative samples, while we freeze the teacher model rather than updating with an exponential moving average.  \n\nGiven a sentence pair $(s^{(i)},t^{(i)})$ in parallel corpora, where $\\mathbf{\\boldsymbol{s}}^{(i)}$ is the i-th sentence from English and $t^{(i)}$ is from a target language, we treat them as two different views and input $\\boldsymbol{s}^{(i)}$ into the teacher network and $t^{(i)}$ into the student network separately. We minimize Mean Squared Error loss between teacher projections and student predictions:  \n\n$$\n\\begin{array}{r}{\\mathcal{L}_{\\mathrm{SentA}}=\\frac{1}{|\\mathcal{B}|}\\sum_{i\\in\\mathcal{B}}\\left(\\bar{q}_{\\theta}\\left(g_{\\theta}\\left(\\tilde{c}^{(i)}\\right)\\right)-\\bar{g}_{\\xi}\\left(c^{(i)}\\right)\\right)^{2},}\\end{array}\n$$  \n\nwhere $\\boldsymbol{c}^{(i)}$ and $\\tilde{c}^{(i)}$ are the [CLS] tokens of last hidden states of i-th sentence in the teacher and student encoders, $g$ defines the projectors with distinct parameters and $q$ defines the predictor, and $\\bar{g}$ ,$\\bar{q}$ indicate that they are normalized with $L_{2}$ norm. More precisely, we apply 2-layers MLPs to implement projectors and the predictor.  \n\nSentA objective will force different languages closer to semantically similar English sentences in the vector space. Meanwhile, the student network can adopt well-trained English vector space properties by imitating corresponding representations in the teacher network.',
                          'paper_title': 'Multi-level Distillation of Semantic Knowledge for Pre-training Multilingual Language Model.',
                          'source_name': 'EMNLP', 'year': '2022', 'chunk_id': 1,
                          'chunk_ext_id': 454919286708522392},
                      {
                          'chunk_content': '# 3 A LGORITHM\nIn this study, we explain the success of knowledge distillation from a new perspective, i.e. quantifying knowledge points encoded in the intermediate layers of a DNN for classification. In contrast to previous explanations of knowledge distillation, our method enables people to explain the representation power of a DNN using the quantity and the quality of knowledge points encoded in a DNN, which reflects a more direct and stronger connection between the knowledge and performance. To this end, we measure the information in each input unit discarded by the DNN to define knowledge points, based on the information theory. We propose three hypotheses on the mechanism of knowledge distillation to explain its success. To verify these hypotheses, we further design three types of metrics based on the quantification of knowledge points, which evaluate the representation quality of the DNN. Note that theoretically, knowledge distillation is generally considered to involve two fully different utilities. Distillation from highdimensional intermediate-layer features usually exhibits the first type of utility, i.e., forcing the student network to mimic knowledge points of the teacher network. In contrast, distilling from relatively lowdimensional network output often exhibits the second type of utility, i.e., selecting confident samples for learning and ignoring unconfident samples. In this paper, we mainly explain the first utility of knowledge distillation, which is mainly exhibited by distilling from high-dimensional intermediate-layer features.',
                          'paper_title': 'Quantifying the Knowledge in a DNN to Explain Knowledge Distillation for Classification',
                          'source_name': 'IEEE Transactions on Pattern Analysis and Machine Intelligence',
                          'year': None, 'chunk_id': 3, 'chunk_ext_id': 454845524636079026},
                      {
                          'chunk_content': '# Multi-Granularity Structural Knowledge Distillation for Language Model Compression\nChang $\\mathbf{Liu}^{1,2}$ , Chongyang Tao 3 , Jiazhan Feng 1 , Dongyan Zhao ∗1 Wangxuan Institute of Computer Technology, Peking University 2 Center for Data Science, Peking University 3 Microsoft Corporation 4 Artificial Intelligence Institute of Peking University 5 State Key Laboratory of Media Convergence Production Technology and Systems {liuchang97,fengjiazhan,zhaody}@pku.edu.cn ,\n\n# Abstract\nTransferring the knowledge to a small model through distillation has raised great interest in recent years. Prevailing methods transfer the knowledge derived from mono-granularity language units (e.g., token-level or sample-level), which is not enough to represent the rich semantics of a text and may lose some vital knowledge. Besides, these methods form the knowledge as individual representations or their simple dependencies, neglecting abundant structural relations among intermediate representations. To overcome the problems, we present a novel knowledge distillation framework that gathers intermediate representations from multiple semantic granularities (e.g., tokens, spans and samples) and forms the knowledge as more sophisticated structural relations specified as the pair-wise interactions and the triplet-wise geometric angles based on multi-granularity representations. Moreover, we propose distilling the well-organized multi-granularity structural knowledge to the student hierarchically across layers. Experimental results on GLUE benchmark demonstrate that our method outperforms advanced distillation methods.  \n\ninto resource-scarce scenarios, e.g., mobile phones and embedded devices.  \n\nVarious attempts have been made to compress the huge PLMs into small ones with minimum performance degradation. As one of the main approaches, knowledge distillation ( Hinton et al. ,2015 ) utilizes a large and powerful teacher model to transfer the knowledge to a small student model. Based on the teacher-student framework, Jiao et al. (2020 ); Wang et al. (2020 ) distilled the token-level representations and attention dependencies to the student, Sanh et al. (2019 ); Sun et al. (2019 ) taught the student to mimic the output logits of the teacher, Sun et al. (2020 ) enforced the student’s representation to be closed to the teacher’s while pushing negative samples to be far apart. Although proved effective, existing approaches have some flaws. For one thing, these distillation methods only adopted the representations of mono-granularity language units (i.e., token-level or sample-level), while neglecting other granularity. For another, their distillation objectives either matched the corresponding representations between the teacher and the student or aligned the attention dependencies, failing to capture more sophisticated structural relations between the representations.\n\n# 1 Introduction\nRecent years have witnessed a surge of pre-trained language models ( Devlin et al. ,2019 ;Lewis et al. ,2020 ;Clark et al. ,2020 ;Brown et al. ,2020 ). Building upon the transformer architecture ( Vaswani et al. ,2017 ) and pre-trained on large-scale corpora using self-supervised objectives, these PLMs have achieved remarkable success in a wide range of natural language understanding and generation tasks. Despite their high performance, these PLMs usually suffer from high computation and memory costs, which hinders them from being deployed  \n\nTo address these issues, in this paper we propose a novel knowledge distillation framework named MultiGranularity Structural Knowledge Distillation (MGSKD) through answering the three research questions: (1) which granularity should the knowledge be, (2) what form of knowledge is effective to transfer and (3) how to teach the student using the knowledge. For the “ which ” question, given that natural languages have multiple semantic granularities, we consider the intermediate representations in three granularities: tokens, spans and samples. Specifically, we first take the sub-word tokens as the smallest granularity, then select phrases and whole words as spans for they hold complete meanings, and finally treat the whole input texts as samples. We use mean-pooling to obtain the representations of spans and samples based on token representations. For the “ what ”question, we propose to leverage the sophisticated structural relations between the representations as the knowledge. Concretely, instead of aligning the corresponding representations of the teacher and the student, we propose to form the knowledge as the pair-wise interactions and the triplet-wise geometric angels of a group of representations. For the “how ” question, following the recent findings that the bottom layers capture syntactic features while the upper layers encode semantic features ( Jawahar et al. ,2019 ), we conduct hierarchical distillation where the bottom layers of the student are taught token-level and span-level knowledge while the upper layers learn sample-level knowledge.  \n\nWe conduct comprehensive experiments on standard language understanding benchmark GLUE ( Wang et al. ,2018 ). Experimental results demonstrate that our knowledge distillation framework outperforms strong baselines methods. Surprisingly, MGSKD achieves comparable or better performance than $\\mathrm{BERT_{base}}$ on most of the tasks on GLUE, while keeping much smaller and faster. Our contributions in this paper are three folds:  \n\nmantic representations in language (i.e., the repre•We are the first to leverage multi-granularity sesentations of tokens, spans and samples) for knowledge distillation.  \n\ncated structural relations specified as the pair-wise •We propose to form the knowledge as sophistiinteractions and the triplet-wise geometric angles based on multi-granularity representations.  \n\nGLUE benchmark and MGSKD achieves superior •We conduct comprehensive experiments on results over other knowledge distillation baselines.',
                          'paper_title': 'Multi-Granularity Structural Knowledge Distillation for Language Model Compression',
                          'source_name': 'ACL', 'year': '2022', 'chunk_id': 0,
                          'chunk_ext_id': 454898870263482564},
                      {
                          'chunk_content': '# 4 Related Work\nKnowledge Localization Existing methods roughly fall into two categories: (1) Gradient-based method: Dai et al.(2022) first introduces the concept of knowledge neurons and localizes them by assessing the contribution of each neuron (Geva et al. 2021) through calculating their attribution scores using integrated gradients. (2) Causal-inspired method, introduced by Meng et al.(2022a), defines knowledge neurons as the neuron activations within PLMs that have the strongest causal effect on predicting certain factual knowledge, and this method has inspired the creation of knowledge editing algorithms such as ROME (Meng et al. 2022a), MEMIT (Meng et al. 2022b), and MEND (Mitchell et al. 2022). However, current methods lack a universal approach for different PLM architectures and exploration in multiple languages.  \n\nAxiomatic Attribution Methods Sundararajan, Taly, and Yan(2017) introduces the axiomatic attribution method, emphasizing Sensitivity and Implementation Invariance as the core axioms for attribution methods, leading to Integrated Gradients (IG). Subsequent research includes Discretized IG (Sanyal and Ren 2021), which uses interpolation strategies for gradient accuracy; Sequential IG (Enguehard 2023) designed for word importance evaluation; and Effective Shapley value along with Shapley IG, developed by Liu et al.(2022) to enhance efficiency and effectiveness. We improve the baseline vectors for IG to minimize their information content.\n\n# 5 Conclusion\nIn this research, we explore factual knowledge localization in multilingual PLMs using our architecture-adapted multilingual integrated gradient method. We further design two modules, leading to two discoveries of language-independent knowledge neurons and degenerate knowledge neurons. The former affirms that a portion of the knowledge in multilingual PLMs exists in a form that transcends language, while the latter introduces a novel type of neuron which is similar to the degeneration phenomenon observed in biological systems, and these neurons can be used to detect incorrect facts.',
                          'paper_title': 'Journey to the Center of the Knowledge Neurons: Discoveries of Language-Independent Knowledge Neurons and Degenerate Knowledge Neurons',
                          'source_name': 'AAAI', 'year': '2024', 'chunk_id': 6,
                          'chunk_ext_id': 454846745259371804},
                      {
                          'chunk_content': '# 2 Methodology\n\n# 2.1 Learning Conceptual Roles from Dictionary\nIn an effort to imitate the human language understanding process, we employed the conceptual role theory, a convincing theory assuming that it is a concept’s role in some greater mental theory that primarily determines its meaning ( Piantadosi and Hill ,2022 ), where the key is the interrelation between concepts ( Deacon ,1998 ;Santoro et al. ,2021 ). For example, the meaning of “water” can be defined by other interlinked concepts like “liquid”, “without smell”, “hydrogen”, and “oxygen”.  \n\nThe recent success of large-size PLM s supports the conceptual role theory ( Piantadosi and Hill ,2022 ). PLM s are the contemporary NLP techniques that exploit the distributional hypothesis as their learning objective, i.e., masked language modelling ( Sinha et al. ,2021a )1 . This hypothesis assumes that semantically analogous words will appear in similar contexts ( Harris ,1954 ). Hence, PLM s learn to define the words’ meaning through other words that co-occur in similar contexts. That is, they capture the interrelationship of concepts frequently appearing in analogous contexts. Although the distributional hypothesis is an efficient assumption allowing self-supervised training, it cannot perfectly capture the interrelation between concepts because the concepts can deliver different meanings even though they often appear in similar contexts. For example, it is difficult to capture semantic antonymy through the distributional hypothesis ( Jang et al. ,2022b ).  \n\nTo this end, we aimed to improve PLM s’ understanding of meaning by making them learn more precise interrelationships. Recent studies revealed that the training data play a critical role in deciding the inductive bias rather than the model’s structure or learning objective ( Furrer et al. ,2020 ;Wang et al. ,2022 ;Jang et al. ,2022a ). Therefore, we designed a learning task that provides training instances where a concept and other closely interconnected concepts are presented together to a model having a language modelling objective. To achieve this, we used word-definition pairs from a dictionary, because a definition is a composition of words explaining the target word, and thereby, it has been the most commonly used and very effective tool for vocabulary learning, especially for second and foreign language learners ( Takahashi ,2012 ;Zhang et al. ,2020 ). A target word and its definitions were concatenated as a single text and used as a training instance for language modelling, allowing a model to determine a word’s meaning based on highly related concepts rather than those that appear in similar contexts. For example, when it comes to the words “happy” and “unhappy”, distributional models normally generate high similarity, because they are both emotional expressions and appear in similar contexts frequently 2 . However, our proposed task enables capturing precise interconnection between concepts: “unhappy” with “not happy”, “sad”, and “not pleased”. An example of our training data is provided in Table 6 in Appendix C.  \n\nWe introduced the intermediate training technique ( Phang et al. ,2018 ;Wang et al. ,2019a ;Liu et al. ,2019a ;Pruksachatkun et al. ,2020 ;Vu et al. ,2020 ) that first trains PLM s on an intermediate task and uses it for other downstream tasks. Hence, we retrained PLM s on our new dataset and named it conceptual role model ( CRM ). A leading cause for using the intermediate training is that the number of word-definition pairs was not sufficient enough to train large PLM s from scratch, and their weights can be used as good initial values. The CRM has practical advantages in that (1) it can be applied to any PLM trained with language modelling objectives, and (2) it is readily applicable to other languages due to the relative ease of collecting dictionary data.\n\n# 2.2 Training-efficient Parameter Integration\nWe propose a parameter integration method to effectively incorporate the previous knowledge obtained by PLM with enhanced meaning awareness of CRM.\n\n# 2.2.1 Problem Statement\nLet $\\mathbf{W}_{p}\\in\\mathcal{R}^{d\\times l}$ and $\\mathbf{W_{c}}\\in\\mathcal{R}^{d\\times l}$ be the parameter matrices of the PLM and CRM , respectively. Note that the PLM and CRM share the same model architecture, i.e., having a parameter matrix of the same size. We aim to learn $\\mathbf{W}_{n e w}\\in\\mathcal{R}^{d\\times l}$ ., an integrated parameter matrix, from $\\mathsf{W}_{p}$ and $\\mathbf{W}_{c}$ during fine-tuning. Thus, the process can be defined as follows:  \n\n$$\n\\mathbf{W}_{n e w}=\\mathbf{W}_{o}\\left[\\mathbf{W}_{p}\\right],\n$$  \n\nwhere $\\mathbf{W}_{o}\\in\\mathcal{R}^{d\\times2d}$ is a learnable parameter matrix while $\\mathsf{W}_{p}$ and $\\mathbf{W}_{c}$ remain fixed during fine-tuning. composing $\\mathrm{W}_{o}$ into $\\mathbf{W}_{1}\\in\\mathcal{R}^{d\\times d}$ and $\\mathbf{W}_{2}\\in$ $\\mathcal{R}^{d\\times d}$ R, Eq. 1 can be rewritten as follows:  \n\n$$\n\\begin{array}{r}{\\mathbf{W}_{n e w}=\\left[\\mathbf{W}_{1}\\quad\\mathbf{W}_{2}\\right]\\left[\\mathbf{W}_{p}\\right]}\\\\ {\\mathbf{W}_{c}\\mathbf{J}}\\\\ {=\\mathbf{W}_{1}\\mathbf{W}_{p}+\\mathbf{W}_{2}\\mathbf{W}_{c}}\\\\ {=\\mathbf{W}_{p}^{\\prime}+\\mathbf{W}_{c}^{\\prime},\\qquad\\qquad}\\end{array}\n$$  \n\nwhere $\\mathbf{W}_{p}^{\\prime}$ and $\\mathbf{W}_{c}^{\\prime}$ denote the matrices after finetuning. Finally, decomposing $\\mathrm{W^{\\prime}}$ with a fixed ( W)and updated part $(\\Delta\\mathsf{W})$ allows us to reformulate Eq. 2 as follows:  \n\n$$\n\\begin{array}{r}{\\mathbf{W}_{n e w}=\\left(\\mathbf{W}_{p}+\\Delta\\mathbf{W}_{p}\\right)+\\left(\\mathbf{W}_{c}+\\Delta\\mathbf{W}_{c}\\right)\\quad}\\\\ {=\\mathbf{W}_{p}+\\mathbf{W}_{c}+\\Delta\\mathbf{W}_{t}.\\quad}\\end{array}\n$$  \n\nAs a result, ${\\bf W}_{n e w}$ becomes the addition of fixed ces $\\mathsf{W}_{p}$ ,$\\mathrm{W}_{c}$ , and a learned matrix $\\Delta\\mathbf{W}_{t}\\,\\in$ $\\mathcal{R}^{d\\times l}$ R.  \n\nUpdating the whole parameters of $\\Delta\\mathbf{W}_{t}$ is exactly the same as the fine-tuning with the pretrained weights of $\\mathbf{W}_{p}+\\mathbf{W}_{c}$ , which is impractical particularly for large-sized PLM s. Also, finetuning contains the risk of catastrophic forgetting, as studied in many previous works ( Pruksachatkun et al. ,2020 ;Wallat et al. ,2020 ). To compensate for this, we introduce a low-rank adaptation technique ( Hu et al. ,2022 ) for the parameter integration, which fixes the pre-trained weights and updates only a few number of parameters based on PLM s’ intrinsic dimension ( Aghajanyan et al. ,2021 ). Specifically, we transform $\\Delta\\mathbf{W}_{t}$ to a multiplication of two matrices $\\mathsf{A}\\in\\mathcal{R}^{d\\times r}$ and $\\mathbf{B}\\in\\mathcal{R}^{r\\times l}$ ,where $r\\ll\\operatorname*{min}(d,l)$ :  \n\n$$\n\\begin{array}{r}{{\\bf W}_{n e w}={\\bf W}_{p}+{\\bf W}_{c}+{\\bf A}{\\bf B}.}\\end{array}\n$$  \n\nAs a consequence, the number of trainable parameters is considerably reduced, enabling efficient fine-tuning for large-sized PLM s. Also, compared to other adapter modules ( Houlsby et al. ,2019 ;Pfeiffer et al. ,2021 ) that introduce additional adapter modules between layers, the approach only adds up $A B$ without increasing the number of parameters. As a result, it avoids the use of any additional time or resources during the inference phase, which enables practical inference ( Hu et al. ,2022 ).  \n\nAggregating $\\mathbf{W}_{p}$ and $\\mathbf{W}_{c}$ .The addition of $W_{p}$ and $W_{c}$ in Eq. 3 causes the amplification of the weight scale, which prevents us from using or searching training hyperparameters based on values used in prior studies. Thus, we used the simple averaging aggregation method ( Wortsman et al. ,2022 ):  \n\n$$\n\\mathbf{W}_{n e w}=\\frac{\\mathbf{W}_{p}+\\mathbf{W}_{c}}{2}+\\mathrm{A^{\\prime}B^{\\prime}}\\,.\n$$  \n\nThe reason behind leveraging the simple averaging technique is to show the efficacy of CRM ,i.e., achieving remarkable improvements in consistency through the most straightforward aggregation method. However, alternative aggregation methods, such as Fisher-Weighted Average ( Matena and Raffel ,2022 ) or RegMean ( Jin et al. ,2023 ), can also be employed.  \n\nAdditional Knowledge Add-up. Note that the aggregation further allows us to integrate the weights of any other models if they share the same structure with the PLM and CRM . Thus, Eq. 5 can be rewritten as follows:  \n\n$$\n\\mathbf{W}_{n e w}=\\frac{1}{|S|+2}\\sum_{i\\in\\{p,c,S\\}}\\mathbf{W}_{i}+\\mathbf{A}^{\\prime}\\mathbf{B}^{\\prime},\n$$  \n\nwhere $S$ is the set of additional PLM s’ weights that are going to be added. Note that, as the parameters are integrated through an addition, no additional training/inference resources are required during fine-tuning, which is computationally beneficial.  \n\nApplying the approach to PLM s. As modern PLM s have a transformer ( Vaswani et al. ,2017 )as a backbone architecture, we followed Hu et al. (2022 ) to apply the low-rank adaptation technique, i.e., limiting its usage to self-attention weights and excluding MLP weights from the scope. We also used the same hyperparameters found by Hu et al. (2022 ), i.e., for a hidden representation $x$ ,$\\Delta\\mathbf{W}_{t}^{\\prime}x$ is scaled by $\\frac{\\alpha}{r}$ , where $\\alpha$ and $r$ are set to 16 and 8, respectively.',
                          'paper_title': 'Improving Language Models Meaning Understanding and Consistency by Learning Conceptual Roles from Dictionary',
                          'source_name': 'EMNLP', 'year': '2023', 'chunk_id': 1,
                          'chunk_ext_id': 454845695768942674},
                      {
                          'chunk_content': '# 6 Conclusion\nIn this work, we propose a new paradigm of injection toward flexible and efficient knowledge injection. In this paradigm, downstream models can be enhanced with little computational cost, which benefits large amounts of models. We first systematically evaluate existing knowledge injection methods and find that they are not suitable for plugand-play injection. Then, we propose map-tuning for this paradigm, which effectively injects knowledge into downstream models to enhance them.  \n\nThere are four promising directions for future investigation into plug-and-play knowledge injection. (1) How can we reduce the performance gap between methods for this novel paradigm and those for the previous injection paradigms, while maintaining superior flexibility and efficiency? (2) Besides factual knowledge, how can we effectively plug diverse knowledge bases, such as text corpora, voice, images, and even other PLMs? (3) After injecting the knowledge in a plug-and-play way, how can the PLMs do various types of complex reasoning based on the injected knowledge ( Onoe et al. ,2023 )? (4) Can the plug-and-play knowledge injection methods for these sources be unified, so we can plug a combination of multiple sources? We hope this work can attract attention to and inspire research on these problems.\n\n# Limitations\nIn this paper, we present a novel knowledge injection paradigm plug-and-play knowledge injection for PLMs. We show existing methods can not be well applied to the new paradigm and propose maptuning as a preliminary exploration of methods.  \n\nThe paradigm plug-and-play knowledge injection has a limitation in terms of its assumption. It assumes that a PLM should be fine-tuned for downstream tasks. However, very large-scale PLMs can perform zero-shot learning or in-context learning on downstream tasks without being fine-tuned. Future work may extend the definition of the proposed paradigm to make it meaningful in these scenes.  \n\nThe method map-tuning has three limitations in terms of its applicability. Firstly, we did not evaluate map-tuning for PLMs pre-trained by other language modeling objectives (e.g., casual language modeling) besides MLM. As its spirit can be easily generalized to various language modeling objectives, we leave this evaluation as future work. Secondly, we did not evaluate whether the PLM can do complex reasoning (e.g., multi-hop reasoning) based on the knowledge injected by map-tuning. Thirdly, map-tuning is designed to plug structural fact knowledge. It is also meaningful to plug other diverse knowledge bases, including text corpora, voice, images, and even other PLMs, which are not covered by our work.\n\n# Acknowledgments\nThis work is supported by the National Key R&D Program of China (No.2022ZD0116312), National Natural Science Foundation of China (No. 62236004).  \n\nAuthor Contributions Zhengyan Zhang, Zhiyuan Zeng, Huadong Wang, and Deming Ye wrote the code and conducted the experiments. Zhengyan Zhang constructed the basic experimental framework including codes and datasets. Zhiyuan Zeng was in charge of plug-and-play and fine-tuning experiments. Huadong Wang and Deming Ye provided TransE and PELT embeddings respectively. Zhengyan Zhang and Zhiyuan Zeng contributed to the analysis experiments. Zhengyan Zhang and Zhiyuan Zeng wrote the initial draft. Yankai Lin, Huadong Wang, Chaojun Xiao, Xu Han, and Zhiyuan Liu significantly edited and improved the paper. Peng Li, Maosong Sun, and Jie Zhou provided valuable advice to the research.\n\n\n\n# A Hyper-parameters\n\n# A.1 Fine-tuning downstream PLMs\nWe experiment with four training methods for the adaptation of PLMs on downstream tasks, which are Full-model fine-tuning, LoRA, Adapter, and BitFit. The embedding layer is frozen during training. We train all the models using AdamW with $10\\%$ warming-up steps. We list our hyperparameters in Table 5 .\n\n# A.2 General Map-tuning\nFor general map-tuning, we search the dropout rate in {0.15, 0.25, 0.35, 0.45}. We train all the mapping networks using Adam ( Kingma and Ba ,2015 ). The learning rate is 3E-5 and the batch size is 64. We train the mapping network on the Wikipedia corpus for 5 epochs. The hyper-parameters of the best mapping network in all cases are listed in Table 6 . When we evaluate RA on these datasets, we set the sequence length to 512.\n\n# A.3 Task-specifc Map-tuning\nWe report hyper-parameters for task-specific maptuning in Table 7 . We train all mapping networks using Adam with $10\\%$ warming-up steps  \n\nRegarding the results reported in Table 2 , during task-specific map-tuning, we use dropout in the attention probabilities and all fully connected layers of the PLM. The dropout rate is 0.30, 0.20, and 0.00 for Wiki80, Wiki-ET, and EntityQuestions, respectively. Regarding the results reported in Table 3 , when using training data from the source domain for task-specific map-tuning, the dropout rate is 0.35. In these cases, the training data for task-specific map-tuning are identical to those for fine-tuning the downstream models. We search the dropout rate in {0.00, 0.15, 0.20, 0.25, 0.30, 0.35}. When using training data from the target domain for task-specific map-tuning, we do not use dropout.  \n\n<html><body><table><tr><td>Hyper-parameters</td><td>FewRel</td><td>Wiki80</td><td>Wiki-ET</td><td>EntityQuestions</td></tr><tr><td>LearningRate</td><td>2E-5</td><td>4E-4</td><td>5E-5</td><td>2E-3</td></tr><tr><td>BatchSize</td><td>4</td><td>64</td><td>128</td><td>64</td></tr><tr><td>Training Step/Epoch</td><td>3000</td><td>30</td><td>7</td><td>5</td></tr></table></body></html>  \n\nTable 7: Hyper-parameters for task-specific map-tuning.   \nTable 8: Hyper-parameters for map-tuning on the Wikipedia corpus, after which we fine-tune BERT on downstream tasks with the mapping network plugged.   \n\n\n<html><body><table><tr><td></td><td>FewRel</td><td>Wiki80</td><td>Wiki-ET</td><td>EntityQuestions</td></tr><tr><td>Training Epoch</td><td>5</td><td>2</td><td>2</td><td>4</td></tr></table></body></html>  \n\nThe hyper-parameters for experiments with RoBERTa are identical to those with BERT.',
                          'paper_title': 'Plug-and-Play Knowledge Injection for Pre-trained Language Models.',
                          'source_name': 'ACL', 'year': '2023', 'chunk_id': 6,
                          'chunk_ext_id': 454847874802528418},
                      {
                          'chunk_content': '# 2. Related Works\n\n# 2.1. Knowledge Distillation\nKnowledge distillation [ 24 ] aims to distill knowledge from a larger teacher model to a smaller student model to improve the performance of the student model. Many studies have been proposed in recent years, which can be divided into three groups, i.e., logits-based, feature-based, and relation-based, according to the knowledge types.  \n\nLogits-based [ 24 ,36 ] knowledge distillation utilizes the logits of the teacher model as the knowledge. In the vanilla knowledge distillation [ 24 ], the student model mimics the logits of the teacher model by minimizing the KL-divergence of the class distribution. Feature-based methods [ 7 ,39 ,40 ]utilize the output of intermediate layers, i.e., feature maps, as the knowledge to supervise the training of the student model. Relation-based knowledge distillation [ 38 ,59 ] distills the relation between samples rather than a single instance.  \n\nThese methods mentioned above perform offline distillation. Some studies [ 6 ,19 ,56 ,57 ,60 ] are developed to perform online distillation, i.e., the teacher and the student model are trained simultaneously. Deep mutual learning [ 57 ] is first proposed to train multiple models collaboratively. After that, studies are proposed to improve deep mutual learning regarding generalization ability [ 6 ,19 ] and computation effi- ciency [ 60 ]. All these methods are trained in a supervised manner.\n\n# 2.2. Self-Supervised Knowledge Distillation\nDue to significant improvement for small models, knowledge distillation is introduced to self-supervised learning to improve the performance of small models. CRD [ 44 ]combines a contrastive loss with knowledge distillation to transfer the structural knowledge of the teacher model. SimCLR-v2 [ 9 ] proposes to train a larger model via selfsupervised learning first and uses the supervised finetuned large model to distill a smaller model via self-supervised learning. SSKD [ 52 ] combines self-supervised learning with supervised learning to transfer richer knowledge. Compress [ 1 ] and SEED [ 15 ] transfer the knowledge of probability distribution in a self-supervised manner by utilizing the memory bank in MoCo [ 21 ]. SimReg [ 37 ] directly conducts feature distillation by minimizing the squared Euclidean distance between the features of the teacher and student. While ReKD [ 58 ] transfers the relation knowledge to the student. DisCo [ 16 ] proposes to transfer the final embeddings of a self-supervised pre-trained teacher. There is a limitation in these SSL-KD methods, i.e., knowledge is distilled to a student model from a static teacher model in a unidirectional way. The teacher model cannot absorb knowledge from the student model. Recently, DoGo [ 3 ] and MCL [ 53 ] combined MoCo [ 21 ] with mutual learning [ 57 ] for online SSL-KD. However, they either lack a direct comparison with SSLKD methods on mainstream backbones and tasks or can’t guarantee the performance of larger models.\n\n# 3. Methods\nIn this section, we first introduce the overall architecture of MOKD in Sec. 3.1 . Then, the two distillation modes of MOKD, i.e., self-distillation and cross-distillation, are introduced in Sec. 3.2 and Sec. 3.3 , respectively. Finally, the training procedure and implementation details are introduced in Sec. 3.4 .  \n\n  \nFigure 2. The overall architecture of MOKD. In MOKD, two different models (model1 and model2) are trained collaboratively in a self-supervised manner. There are two types of knowledge distillation modes: self-distillation and cross-distillation modes. And the distillation procedure is performed in two feature spaces projected by two types of projection heads, i.e., (a) MLP-Head and (b) T-Head. “sg" denotes the stop-gradient operation.\n\n# 3.1. Overall Architecture\nThe overall architecture of MOKD is shown in Fig. 2 .In MOKD, two different models $\\pmb{f}_{i}~(i=1,2)$ are trained collaboratively in a self-supervised manner. There are two knowledge distillation modes: self-distillation and crossdistillation modes. In each model, a multi-layer-perceptron head (MLP-Head) (Fig. 2 (a)) and a Transformer head (THead) (Fig. 2 (b)) are employed to project the feature representations $Z$ produced by the encoders to the output embeddings $\\mathbf{\\nabla}m$ and $\\pmb{t}$ for self-distillation and cross-distillation. Here, the T-Head, which consists of several Transformer blocks, is designed to enhance the semantic alignment between the two models. Self-distillation, which is conducted between each model $\\pmb{f}_{i}$ (as a student) and its EMA version model $\\pmb{f}_{i}^{\\prime}$ (as a teacher), performs self-supervised learning for each model independently. The self-distillation losses are $\\mathcal{L}_{s m i}$ and $\\mathcal{L}_{s t i}$ for the MLP-Head and T-Head, respectively, which will be introduced in Sec. 3.2 . While cross-distillation, which is conducted between the two models, is employed for knowledge interaction between the two models. In crossdistillation, by utilizing the self-attention mechanism of the T-Head, we design a cross-attention feature search strategy to enhance semantic alignment between different models. The cross-distillation losses are $\\mathcal{L}_{c m i}$ and $\\mathcal{L}_{c t i}$ for the MLPHead and T-Head, respectively, which will be introduced in Sec. 3.3 . Here, the subscript $s$ and $c$ stand for self-distillation and cross-distillation, respectively. And the subscript $m$ and $t$ stand for MLP-Head and T-Head, respectively.',
                          'paper_title': 'Multi-Mode Online Knowledge Distillation for Self-Supervised Visual Representation Learning.',
                          'source_name': 'CVPR', 'year': '2023', 'chunk_id': 1,
                          'chunk_ext_id': 454847553378843868}],
                  content="""In this chapter, we will delve into the principles and types of knowledge distillation. As mentioned earlier, we have explored the challenges of large language models (LLMs) in planning and the importance of enhancing their planning capabilities. Knowledge distillation emerges as a promising approach to transfer knowledge from LLMs to smaller models, thereby improving their performance. According to the literature, knowledge distillation has achieved promising performance in various applications, including natural language processing, computer vision, and information retrieval <sup>55</sup><sup>57</sup><sup>58</sup>. Furthermore, recent works have focused on transferring reasoning abilities from LLMs to small language models, enabling them to perform tasks such as common sense reasoning and symbol reasoning <sup>55</sup>. However, despite the progress made, there is still a significant performance gap when it comes to solving challenging mathematical problems <sup>55</sup>.

Knowledge distillation involves training a student model using valuable information from the output of a teacher model, thereby transferring the teacher model's knowledge into the student model <sup>57</sup>. The knowledge transferred can be categorized into different types based on the approach used. One common approach is logits-based knowledge distillation, where the student model mimics the logits of the teacher model by minimizing the KL-divergence of the class distribution <sup>64</sup>. This method has been widely used in various domains, including information retrieval, where the teacher model is often a complex reranker with higher capacity but lower efficiency compared to the efficient dual-encoder based student model <sup>57</sup>.

Another approach is feature-based knowledge distillation, which utilizes the output of intermediate layers (feature maps) as knowledge to supervise the student model's training <sup>64</sup>. This method allows the student model to learn rich representations from the teacher model, enabling it to capture intricate patterns and relationships in the data. Feature-based knowledge distillation has been successfully applied in tasks such as image classification and object detection in computer vision <sup>58</sup>.

Relation-based knowledge distillation, on the other hand, focuses on distilling the relation between samples rather than individual instances <sup>64</sup>. This approach is particularly useful in tasks where the relationship between samples is crucial, such as knowledge graph completion and recommendation systems. By learning the relationships between samples, the student model can make more accurate predictions and recommendations.

In addition to the different types of knowledge distillation, there are also variations in the training process. Offline distillation methods involve training the teacher model separately and then using its outputs to train the student model <sup>64</sup>. On the other hand, online distillation methods train the teacher and student models simultaneously, allowing for dynamic knowledge transfer and adaptation <sup>64</sup>. Deep mutual learning is an example of online distillation, where multiple models are trained collaboratively to improve generalization ability and computational efficiency <sup>64</sup>.

Self-supervised knowledge distillation introduces knowledge distillation to self-supervised learning, aiming to enhance the performance of small models <sup>64</sup>. This approach leverages unlabeled data to learn rich representations and transfer knowledge from a pre-trained teacher model to a student model. Methods like CRD, SimCLR-v2, and SSKD have explored different ways to combine self-supervised learning with knowledge distillation, achieving promising results <sup>64</sup>.

In summary, knowledge distillation is a powerful technique for transferring knowledge from LLMs to smaller models, enabling them to perform complex tasks with improved performance. The different types of knowledge distillation, including logits-based, feature-based, and relation-based, offer flexibility in transferring different aspects of knowledge. Furthermore, variations in the training process, such as offline and online distillation, provide options for dynamic knowledge transfer and adaptation. In the next chapter, we will explore the application of knowledge distillation in enhancing the planning capabilities of LLMs.""")
    s13 = Section(section_identifier="3_2_2", title="大型语言模型（LLMs）的兴起",
                  description='阐述了LLMs在自然语言处理领域的快速发展，以及其在各种任务中的广泛应用。',
                  reference=[
                      {
                          'chunk_content': '# 5 Conclusion\nIn this work, we investigate the problem of distilling knowledge from larger LLMs to smaller ones. We find that the standard distillation methods that minimize the forward KLD is sub-optimal in language generation scenarios because the teacher’s output distribution contains much more modes than the student’s, and forward KLD forces the student distribution to over-estimate the low-probability regions of the teacher distribution. Therefore, we propose M INI LLM that minimizes the reverse KLD between the teacher and student distribution and develop an algorithm to optimize this objective. Extensive experiments in the instruction-following setting show that M INI LLM models produce more precise responses that have higher overall quality than standard KD approaches. We also find that M INI LLM has lower exposure bias, better calibration, and higher performance in long-text generation with good diversity.\n\n\n\n# A Derivations\n\n# A.1 Derivation of Equation 1\nWe compute the gradient of ${\\mathcal{J}}(\\theta)=\\mathrm{KL}[q_{\\theta}||p]$ with respect to $\\theta$ using the Policy Gradient Theorem [ SMSM99 ]:  \n\n$$\n\\begin{array}{r l}{\\nabla\\mathcal{I}(\\theta)=-\\nabla_{\\psi^{\\alpha}(\\cdot)}\\frac{\\mathbb{E}}{\\|\\psi(y)\\|^{\\alpha}}}&{}\\\\ {=-\\int\\nabla\\left\\{\\mu(y)|x\\right\\}\\log\\frac{p(y|x)}{\\mu(y|y)}\\Biggr]\\,\\mathrm{d}y}\\\\ &{=-\\int\\mathrm{d}\\psi(y|x)\\nabla\\log\\frac{p(y|x)}{\\mu(y|x)}\\mathrm{d}y-\\int\\mathrm{log}\\frac{p(y|x)}{\\mu(y|y)}\\nabla\\varphi(y|x)\\mathrm{d}y}\\\\ &{=\\int\\mathrm{d}\\psi\\|x)\\nabla\\log(\\psi(y|x))\\mathrm{d}y-\\int\\mathrm{eq}(y|x)\\log\\frac{p(y|x)}{\\mu(y|x)}\\nabla\\log(y|x)\\mathrm{d}y}\\\\ &{=-\\underbrace{p(y|x)}_{\\nu\\sim\\mathbb{R}(\\cdot)}(\\log\\frac{p(y|x)}{\\phi(y|x)})-1\\nabla\\log\\theta(y|x)}\\\\ &{=-\\underbrace{\\frac{\\mathbb{I}}{\\mathrm{d}\\psi(y|y)}}_{\\nu\\sim\\mathbb{R}(\\cdot)}\\frac{T}{\\displaystyle\\sum_{i=1}^{T}\\log\\frac{p(y|x)}{\\phi(y|y)}-x}_{\\nu\\sim\\mathbb{R}(\\cdot)}-1\\nabla\\log\\varphi(y|y)\\epsilon_{i\\cdot\\cdot}x)}\\\\ &{=-\\underbrace{p(x)}_{\\nu\\sim\\mathbb{R}(\\cdot)}\\frac{T}{\\displaystyle\\sum_{i=1}^{T}\\int\\mathrm{d}\\psi}\\frac{p(y|y_{\\varepsilon\\cdot}x,x)}{\\phi(y|y)\\epsilon_{i\\cdot}x\\cdot x}-1\\nabla\\log\\varphi(y|y_{\\varepsilon\\cdot}x,x)}\\\\ &{=-\\underbrace{p(x)}_{\\nu\\sim\\mathbb{R}(\\cdot)}\\frac{T}{\\displaystyle\\sum_{i=1}^{T}\\log\\frac{p(y|y_{\\varepsilon\\cdot}y,x)}{\\phi(y|y_{\\varepsilon\\cdot}y,x)}-1}_{\\nu\\sim\\mathbb{R}(\\cdot)}+1\\nabla\\log\\varphi(y|y_{\\varepsilon\\cdot}x,x),}\\end{array}\n$$  \n\nwhere Equation 14 is based on the fact that $\\log q_{\\theta}(y_{t}|\\pmb{y}_{<t},\\pmb{x})$ can only affect tokens at $\\geq t$ positions   \nin $\\textit{\\textbf{y}}$ . By setting $\\begin{array}{r}{R_{t}=\\sum_{t^{\\prime}=t}^{T}\\log\\frac{p\\left(y_{t^{\\prime}}|y_{<t^{\\prime}},x\\right)}{q_{\\theta}\\left(y_{t^{\\prime}}|y_{<t^{\\prime}},x\\right)}}\\end{array}$ , we obtain Equation 2 .|\n\n# A.2 Derivation of Equation 3\nTo derive Equation 3 , we first denote:  \n\n$$\n\\begin{array}{r l}&{(\\nabla\\mathcal{J})_{\\mathrm{Main}}=-\\underset{y\\sim q_{\\theta}(\\cdot\\vert x)}{\\mathbb{E}}\\sum_{t=1}^{T}R_{t+1}\\nabla\\log q_{\\theta}(y_{t}\\vert y_{<t},x),}\\\\ &{(\\nabla\\mathcal{J})_{\\mathrm{Reg}}=-\\underset{y\\sim q_{\\theta}(\\cdot\\vert x)}{\\mathbb{E}}\\left[\\displaystyle\\sum_{t=1}^{T}\\nabla\\underset{y_{t}\\sim q_{\\theta}(t)}{\\mathbb{E}}[r_{t}]\\right].}\\end{array}\n$$  \n\nThen, we re-write $\\nabla{\\mathcal{I}}(\\theta)$ as:  \n\n$$\n\\begin{array}{r l}&{\\nabla\\mathcal{I}(\\theta)=-\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{Z}\\|}}_{\\theta\\in\\Theta(\\mathrm{in})}\\sum_{t=1}^{r}(R_{1}-1)\\nabla\\log\\varphi(\\psi_{\\mathrm{i}}\\|_{\\mathcal{H}^{c_{1},\\,,x}})}\\\\ &{\\quad=-\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{Z}\\|}}_{\\theta\\in\\Theta(\\mathrm{in})}\\sum_{t=1}^{r}R_{1}+\\nabla\\log\\varphi(\\psi_{\\mathrm{i}}\\|_{\\mathcal{H}^{c_{1},\\,,x}})}\\\\ &{\\quad\\quad-\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{Z}\\|}}_{\\theta\\in\\Theta(\\mathrm{in})}\\sum_{t=1}^{r}\\left(\\log\\frac{p(|\\psi_{\\mathrm{i}}||_{\\mathcal{H}^{c_{1},\\,,x}})}{\\psi(|\\psi_{\\mathrm{i}}||_{\\mathcal{H}^{c_{1},\\,,x}})}-1\\right)\\nabla\\log\\varphi(\\psi_{\\mathrm{i}}\\|_{\\mathcal{H}^{c_{1},\\,,x}})}\\\\ &{\\quad=(\\nabla\\mathcal{I})\\operatorname*{sus}-\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{Z}\\|}}_{\\theta\\in\\Theta(\\mathrm{in})}\\sum_{t=1}^{r}\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{H}^{c_{1},\\,\\theta}\\|_{\\mathcal{H}^{c_{1},\\,\\theta}}}}_{\\theta\\in\\Theta(\\mathrm{in})}\\left(\\log\\frac{p(|\\psi_{\\mathrm{i}}||_{\\mathcal{H}^{c_{1},\\,\\,x}})}{\\psi(|\\psi_{\\mathrm{i}}||_{\\mathcal{H}^{c_{1},\\,,x}})}-1\\right)\\nabla\\log\\varphi(\\psi_{\\mathrm{i}}|_{\\mathcal{H}^{c_{1},\\,,x}})}\\\\ &{\\quad=(\\nabla\\mathcal{I})\\operatorname*{sus}-\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{Z}\\|}}_{\\theta\\in\\Theta(\\mathrm{in})}\\sum_{t=1}^{r}\\underbrace{\\mathbb{E}\\frac{\\|\\mathcal{Z}\\|}{\\|\\mathcal{H}^{c_{1},\\,\\theta}\\|_{\\mathcal{H}^{c_{1},\\,\n$$  \n\nwhere Equation 20 uses the product rule of the gradient and $\\begin{array}{r}{r_{t}=\\log\\frac{p\\left(y_{t}|y_{<t},\\pmb{x}\\right)}{q_{\\theta}\\left(y_{t}|y_{<t},\\pmb{x}\\right)}}\\end{array}$ .|  \n\n  \nFigure 8: The prompt wrapper for training and evaluation.\n\n# BExperimental Details\n\n# B.1 Training Details\nBaselines For models with less than 1.3B parameters, we search for the learning rates in [5e-4, 1e-4, 5e-5], the batch sizes in [32, 64], and train these models for 20 epochs. For other models, we search for the learning rate in [5e-5, 1e-5, 5e-6], the batch sizes in [32, 64], and train these models for 10 epochs. For KD, we follow $[\\mathrm{SST}^{+}20]$ to mix the distillation loss with the language modeling loss on the ground truth responses by a mixture rate of 0.5. The checkpoints of each baseline are selected by the Rouge-L scores on the validation set.  \n\nMINI LLM As shown in Algorithm 2.3 , we first fine-tune the model on the training set using the vanilla language modeling objective to get a starting point of the subsequent M INI LLM training. We fine-tune the model for 3 epochs using the best learning rate and batch size of the corresponding SFT baselines. We select the checkpoint with the lowest validation loss, not the Rouge-L score. Then, we train the model as described in Algorithm 2.3 using a learning rate 5e-6, a mini-batch size 64 in all cases. Similar to PPO $[\\mathrm{SWD}^{+}17]$ , we collect 256 sentences at once and adopt 4 inner epochs. The clipping rate $\\epsilon$ is set to 0.2, and the max length of the model is 512. We use temperature $=1$ when sampling from $q\\theta$ . We train the model for at most 5000 steps and select the final checkpoint using the Rouge$_\\mathrm{L}$ score on the validation set. Our experiments are based on the NVIDIA V100 32G GPUs.\n\n# B.2 Evaluation Details\nDuring the evaluation, we sample the responses from each model using temperature $=1$ , a max-length limit of 512, and random seeds [10, 20, 30, 40, 50]. Similar to $[\\mathrm{TGZ}^{+}23]$ , we adopt a prompt wrapper shown in Figure 8 to convert each instruction-response pair to a sentence. For the GPT-4 feedback, we apply the prompt in Figure 9 and set the temperature $=0.7$ . For the classification tasks in the calibration paragraph of Section 3.3 , we prompt the model to do zero-shot text classification with the prompt in Figure 10 and 11 .\n\n# B.3 Exposure Bias Analysis\nFollowing [ AEABC22 ], we compute the ExAccErr with the following formula:  \n\n$$\n\\mathrm{ExAccErr}(l)=\\frac{R(l)-l\\epsilon(l)}{l\\epsilon(l)}\\times100\\%,\n$$  \n\n$R(l)$ is the accumulated regret of imitating the teacher distribution $p$ at the time step $l$ during the free-run generation:  \n\n$$\nR(l)=\\sum_{t=1}^{T}\\underset{\\underset{y_{t}\\sim q_{\\theta}(\\cdot\\vert y_{<t},x)}{y_{<t}\\sim q_{\\theta}(\\cdot\\vert x)}}{\\mathbb{E}}\\log\\frac{p(y_{t}\\vert y_{<t},x)}{q_{\\theta}(y_{t}\\vert y_{<t},x)},\n$$  \n\nWe would like to request your feedback on the performance of two AI assistants in response to the user instruction and input displayed above.   \nPlease rate the helpfulness, relevance, accuracy, and level of detail of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.   \nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.   \nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment. Below is an instruction that describes a task.   \nWrite a response that appropriately completes the request.  \n\n  \nFigure 9: GPT-4 evaluation prompt.   \nFigure 10: Zero-shot text classification prompt for SST2.   \nFigure 11: Zero-shot text classification prompt for BoolQ.  \n\n### Input:  \n\n### Response:  \n\n<html><body><table><tr><td rowspan="2">Model</td><td rowspan="2">Method</td><td colspan="2">DollyEval</td><td colspan="2">SelfInst</td><td colspan="2">VicunaEval</td><td colspan="2">S-NI</td><td rowspan="2">UnNI R-L</td></tr><tr><td>GPT4</td><td>R-L</td><td>GPT4</td><td>R-L</td><td>GPT4</td><td>R-L</td><td>R-L 28.0</td></tr><tr><td>GPT-J-6B</td><td>Teacher SFT w/o KD</td><td>65.8 50.7</td><td>27.3</td><td>57.4</td><td>17.3</td><td>55.8 43.1</td><td>17.4 16.1</td><td>— 21.5</td><td></td><td>33.6 27.1</td></tr><tr><td rowspan="4">GPT-2-760M</td><td>KD</td><td>51.6</td><td>25.4 26.7</td><td>38.3 38.9</td><td>12.4 13.4</td><td>43.4</td><td>16.4</td><td></td><td>25.9</td><td>33.2</td></tr><tr><td></td><td>51.4</td><td>26.0</td><td>39.2</td><td>14.0</td><td>42.0</td><td>15.3</td><td></td><td>25.5</td><td>32.5</td></tr><tr><td>SeqKD MINILLM</td><td>54.0</td><td>25.8</td><td>43.7</td><td>16.3</td><td>44.3</td><td>19.1*</td><td></td><td>27.1</td><td>35.5*</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td rowspan="4">GPT-2-1.5B</td><td>SFT w/o KD</td><td>58.4</td><td>27.6*</td><td>42.9</td><td>14.3</td><td></td><td>48.6</td><td>16.3 16.5</td><td>27.6</td><td>34.6*</td></tr><tr><td>KD</td><td>56.5</td><td>26.6</td><td>46.0</td><td>14.5</td><td>47.2</td><td></td><td></td><td>27.6</td><td>34.9*</td></tr><tr><td>SeqKD</td><td>58.5</td><td>27.0</td><td>43.2</td><td>13.6</td><td>46.6</td><td>16.9</td><td></td><td>28.0</td><td>34.2*</td></tr><tr><td>MINILLM</td><td>59.6</td><td>25.9</td><td>48.5</td><td>16.6</td><td>48.9</td><td>19.4*</td><td>28.5*</td><td></td><td>35.9*</td></tr><tr><td rowspan="4">GPT-Ne0-2.7B</td><td>SFT w/o KD</td><td>60.7</td><td>26.8</td><td>45.4</td><td>15.8</td><td></td><td>51.5</td><td>17.0</td><td>26.5</td><td>31.6</td></tr><tr><td>KD</td><td>61.5</td><td>26.7</td><td>47.0</td><td>16.0</td><td>52.1</td><td></td><td>16.9</td><td>27.2</td><td>32.7</td></tr><tr><td>SeqKD</td><td>60.8</td><td>25.6</td><td>47.2</td><td>16.2</td><td>53.0</td><td>16.9</td><td></td><td>26.1</td><td>32.9</td></tr><tr><td>MINILLM</td><td>63.4</td><td>28.5*</td><td>52.5</td><td>17.1</td><td>54.1</td><td>18.6*</td><td></td><td>29.8*</td><td>35.4*</td></tr></table></body></html>  \n\nTable 6: Evaluation results when GPT-J is the teacher. GPT4 and $\\mathbf{R}{\\mathrm{-}}\\mathbf{L}$ stand for the average GPT-4 feedback scores and Rouge-L scores across 5 random seeds. The best scores of each model size are boldfaced , and the scores where the student model outperforms the teacher are marked with \\*.  \n\nand $\\epsilon(l)$ is the average per-step error between $q_{\\theta}$ and $p$ using the oracle context sampled from $p$ as the prefix:  \n\n$$\n\\epsilon(l)=\\frac{1}{l}\\sum_{t=1}^{T}\\underset{\\underset{y_{t}\\sim q_{\\theta}(\\cdot|y_{<t},x)}{y_{<t}\\sim p(\\cdot|x)}}{\\mathbb{E}}\\log\\frac{p(y_{t}|y_{<t},x)}{q_{\\theta}(y_{t}|y_{<t},x)}.\n$$  \n\nIntuitively, the regret of $q_{\\theta}$ during generation is made of two parts: the error to estimate $p$ given the oracle context and the error caused by the low-quality model-generated prefix. The former is calculated by $l\\epsilon(l)$ , and the latter reflects the exposure bias. Therefore, ExAccErr measures the relative error caused only by exposure bias.',
                          'paper_title': 'MiniLLM: Knowledge Distillation of Large Language Models',
                          'source_name': 'ICLR', 'year': '2024', 'chunk_id': 5,
                          'chunk_ext_id': 454845826125300826},
                      {
                          'chunk_content': '# 2. Related Works\n\n# 2.1. Knowledge Distillation\nKnowledge distillation [ 24 ] aims to distill knowledge from a larger teacher model to a smaller student model to improve the performance of the student model. Many studies have been proposed in recent years, which can be divided into three groups, i.e., logits-based, feature-based, and relation-based, according to the knowledge types.  \n\nLogits-based [ 24 ,36 ] knowledge distillation utilizes the logits of the teacher model as the knowledge. In the vanilla knowledge distillation [ 24 ], the student model mimics the logits of the teacher model by minimizing the KL-divergence of the class distribution. Feature-based methods [ 7 ,39 ,40 ]utilize the output of intermediate layers, i.e., feature maps, as the knowledge to supervise the training of the student model. Relation-based knowledge distillation [ 38 ,59 ] distills the relation between samples rather than a single instance.  \n\nThese methods mentioned above perform offline distillation. Some studies [ 6 ,19 ,56 ,57 ,60 ] are developed to perform online distillation, i.e., the teacher and the student model are trained simultaneously. Deep mutual learning [ 57 ] is first proposed to train multiple models collaboratively. After that, studies are proposed to improve deep mutual learning regarding generalization ability [ 6 ,19 ] and computation effi- ciency [ 60 ]. All these methods are trained in a supervised manner.\n\n# 2.2. Self-Supervised Knowledge Distillation\nDue to significant improvement for small models, knowledge distillation is introduced to self-supervised learning to improve the performance of small models. CRD [ 44 ]combines a contrastive loss with knowledge distillation to transfer the structural knowledge of the teacher model. SimCLR-v2 [ 9 ] proposes to train a larger model via selfsupervised learning first and uses the supervised finetuned large model to distill a smaller model via self-supervised learning. SSKD [ 52 ] combines self-supervised learning with supervised learning to transfer richer knowledge. Compress [ 1 ] and SEED [ 15 ] transfer the knowledge of probability distribution in a self-supervised manner by utilizing the memory bank in MoCo [ 21 ]. SimReg [ 37 ] directly conducts feature distillation by minimizing the squared Euclidean distance between the features of the teacher and student. While ReKD [ 58 ] transfers the relation knowledge to the student. DisCo [ 16 ] proposes to transfer the final embeddings of a self-supervised pre-trained teacher. There is a limitation in these SSL-KD methods, i.e., knowledge is distilled to a student model from a static teacher model in a unidirectional way. The teacher model cannot absorb knowledge from the student model. Recently, DoGo [ 3 ] and MCL [ 53 ] combined MoCo [ 21 ] with mutual learning [ 57 ] for online SSL-KD. However, they either lack a direct comparison with SSLKD methods on mainstream backbones and tasks or can’t guarantee the performance of larger models.\n\n# 3. Methods\nIn this section, we first introduce the overall architecture of MOKD in Sec. 3.1 . Then, the two distillation modes of MOKD, i.e., self-distillation and cross-distillation, are introduced in Sec. 3.2 and Sec. 3.3 , respectively. Finally, the training procedure and implementation details are introduced in Sec. 3.4 .  \n\n  \nFigure 2. The overall architecture of MOKD. In MOKD, two different models (model1 and model2) are trained collaboratively in a self-supervised manner. There are two types of knowledge distillation modes: self-distillation and cross-distillation modes. And the distillation procedure is performed in two feature spaces projected by two types of projection heads, i.e., (a) MLP-Head and (b) T-Head. “sg" denotes the stop-gradient operation.\n\n# 3.1. Overall Architecture\nThe overall architecture of MOKD is shown in Fig. 2 .In MOKD, two different models $\\pmb{f}_{i}~(i=1,2)$ are trained collaboratively in a self-supervised manner. There are two knowledge distillation modes: self-distillation and crossdistillation modes. In each model, a multi-layer-perceptron head (MLP-Head) (Fig. 2 (a)) and a Transformer head (THead) (Fig. 2 (b)) are employed to project the feature representations $Z$ produced by the encoders to the output embeddings $\\mathbf{\\nabla}m$ and $\\pmb{t}$ for self-distillation and cross-distillation. Here, the T-Head, which consists of several Transformer blocks, is designed to enhance the semantic alignment between the two models. Self-distillation, which is conducted between each model $\\pmb{f}_{i}$ (as a student) and its EMA version model $\\pmb{f}_{i}^{\\prime}$ (as a teacher), performs self-supervised learning for each model independently. The self-distillation losses are $\\mathcal{L}_{s m i}$ and $\\mathcal{L}_{s t i}$ for the MLP-Head and T-Head, respectively, which will be introduced in Sec. 3.2 . While cross-distillation, which is conducted between the two models, is employed for knowledge interaction between the two models. In crossdistillation, by utilizing the self-attention mechanism of the T-Head, we design a cross-attention feature search strategy to enhance semantic alignment between different models. The cross-distillation losses are $\\mathcal{L}_{c m i}$ and $\\mathcal{L}_{c t i}$ for the MLPHead and T-Head, respectively, which will be introduced in Sec. 3.3 . Here, the subscript $s$ and $c$ stand for self-distillation and cross-distillation, respectively. And the subscript $m$ and $t$ stand for MLP-Head and T-Head, respectively.',
                          'paper_title': 'Multi-Mode Online Knowledge Distillation for Self-Supervised Visual Representation Learning.',
                          'source_name': 'CVPR', 'year': '2023', 'chunk_id': 1,
                          'chunk_ext_id': 454847553378843868},
                      {
                          'chunk_content': '# 3 A LGORITHM\nIn this study, we explain the success of knowledge distillation from a new perspective, i.e. quantifying knowledge points encoded in the intermediate layers of a DNN for classification. In contrast to previous explanations of knowledge distillation, our method enables people to explain the representation power of a DNN using the quantity and the quality of knowledge points encoded in a DNN, which reflects a more direct and stronger connection between the knowledge and performance. To this end, we measure the information in each input unit discarded by the DNN to define knowledge points, based on the information theory. We propose three hypotheses on the mechanism of knowledge distillation to explain its success. To verify these hypotheses, we further design three types of metrics based on the quantification of knowledge points, which evaluate the representation quality of the DNN. Note that theoretically, knowledge distillation is generally considered to involve two fully different utilities. Distillation from highdimensional intermediate-layer features usually exhibits the first type of utility, i.e., forcing the student network to mimic knowledge points of the teacher network. In contrast, distilling from relatively lowdimensional network output often exhibits the second type of utility, i.e., selecting confident samples for learning and ignoring unconfident samples. In this paper, we mainly explain the first utility of knowledge distillation, which is mainly exhibited by distilling from high-dimensional intermediate-layer features.',
                          'paper_title': 'Quantifying the Knowledge in a DNN to Explain Knowledge Distillation for Classification',
                          'source_name': 'IEEE Transactions on Pattern Analysis and Machine Intelligence',
                          'year': None, 'chunk_id': 3, 'chunk_ext_id': 454845524636079026}],
                  content="""### Chapter 3.2: Knowledge Distillation Based on Distance Metrics

In this chapter, we will delve into the methods of knowledge distillation based on distance metrics, a pivotal approach for enhancing the planning capabilities of Large Language Models (LLMs). As mentioned earlier, the previous chapter explored the foundational aspects of LLMs' planning capabilities, including their architecture, pre-training and fine-tuning processes, and evaluation methodologies. Building upon this foundation, we now turn our attention to how distance metrics, such as Kullback-Leibler (KL) divergence, Jensen-Shannon (JS) divergence, and Total Variation Distance (TVD), are employed in knowledge distillation to refine LLMs' planning abilities.

Knowledge distillation, as a technique for transferring knowledge from a larger teacher model to a smaller student model, has garnered significant attention for its potential to improve the performance of smaller models <sup>64</sup>. This process typically involves the student model mimicking the behavior of the teacher model by minimizing the divergence between their output distributions. Traditional methods often rely on minimizing the forward KL divergence, where the student model's distribution is aligned with that of the teacher. However, in language generation scenarios, this approach can be sub-optimal due to the teacher's output distribution containing more modes than the student's, leading to a loss of generative diversity and precision <sup>65</sup>.

To address this limitation, innovative approaches such as MINI LLM have been proposed, which focus on minimizing the reverse KL divergence between the teacher and student distributions. This method aims to ensure that the teacher's distribution covers all the modes present in the student's distribution, thereby enhancing the quality and diversity of the generated text <sup>65</sup>. The algorithm developed for optimizing the reverse KL divergence objective has shown promising results in instruction-following settings, producing more precise responses with higher overall quality compared to standard knowledge distillation approaches <sup>65</sup>.

The success of knowledge distillation can be further understood by quantifying the knowledge points encoded in the intermediate layers of a Deep Neural Network (DNN). These knowledge points, defined based on the information discarded by the DNN using information theory, provide a direct connection between the knowledge transferred and the performance of the student model <sup>59</sup>. This perspective highlights two distinct utilities of knowledge distillation: forcing the student network to mimic the teacher network's knowledge points and selecting confident samples for learning <sup>59</sup>.

In the context of distance metrics, KL divergence is a commonly used measure for evaluating the difference between two probability distributions. By minimizing the KL divergence, the student model can effectively learn the probabilistic outputs of the teacher model, thereby improving its predictive performance <sup>64</sup>. However, KL divergence is not without its limitations, particularly in scenarios where the modes of the distributions differ significantly. This is where JS divergence and TVD come into play. JS divergence, a symmetrized and smoothed version of KL divergence, provides a more balanced measure of the difference between distributions, making it suitable for cases where the mode discrepancy is pronounced <sup>64</sup>. TVD, on the other hand, measures the absolute difference between the probabilities assigned by the two distributions, offering a straightforward and intuitive metric for alignment <sup>64</sup>.

Extensive experiments have demonstrated that models trained using reverse KL divergence, such as MINI LLM, exhibit lower exposure bias, better calibration, and higher performance in long-text generation with good diversity <sup>65</sup>. These models outperform other methods across various metrics, including GPT-4 feedback scores and Rouge-L scores, underscoring the efficacy of distance-based knowledge distillation in enhancing LLMs' planning capabilities <sup>65</sup>.

In summary, knowledge distillation based on distance metrics represents a robust strategy for improving the planning abilities of LLMs. By leveraging metrics such as KL divergence, JS divergence, and TVD, researchers can effectively transfer knowledge from larger teacher models to smaller student models, thereby enhancing their performance in complex tasks. The insights gained from these methods not only contribute to the advancement of LLMs but also pave the way for future innovations in model compression and knowledge transfer. In the next chapter, we will explore other methodologies for enhancing LLMs' planning capabilities, including world models and adversarial training, providing a comprehensive overview of the current state-of-the-art techniques.""")

    t.add_node(r)
    t.add_node(s1, r)
    t.add_node(s2, s1)
    t.add_node(s3, s1)
    t.add_node(s4, r)
    t.add_node(s5, s4)
    # t.add_node(s6, s4)
    t.add_node(s7, r)
    t.add_node(s8, s7)
    t.add_node(s9, s8)
    t.add_node(s10, s8)
    t.add_node(s11, s7)
    t.add_node(s12, s11)
    t.add_node(s13, s11)
    token = "d185f605f1b547348750185c108bd63e.pMd0SEPulFz3XvqF"
    generationAgent_instance = GenerationAgent(token)
    asyncio.run(generationAgent_instance.process_md('review1.md', t, 'review1.md'))
    print('测试完毕')
