diff --git "a/20240921/2401.08326v3.json" "b/20240921/2401.08326v3.json" new file mode 100644--- /dev/null +++ "b/20240921/2401.08326v3.json" @@ -0,0 +1,534 @@ +{ + "title": "RoTBench: A Multi-Level Benchmark for Evaluating the Robustness of Large Language Models in Tool Learning", + "abstract": "Tool learning has generated widespread interest as a vital means of interaction between Large Language Models (LLMs) and the physical world. Current research predominantly emphasizes LLMs\u2019 capacity to utilize tools in well-structured environments while overlooking their stability when confronted with the inevitable noise of the real world.\nTo bridge this gap, we introduce RoTBench, a multi-level benchmark for evaluating the robustness of LLMs in tool learning. Specifically, we establish five external environments, each featuring varying levels of noise (i.e., Clean, Slight, Medium, Heavy, and Union), providing an in-depth analysis of the model\u2019s resilience across three critical phases: tool selection, parameter identification, and content filling.\nExperiments involving six widely-used models underscore the urgent necessity for enhancing the robustness of LLMs in tool learning.\nFor instance, the performance of GPT-4 even drops significantly from 80.00 to 58.10 when there is no substantial change in manual accuracy.\nMore surprisingly, the noise correction capability inherent in the GPT family paradoxically impedes its adaptability in the face of mild noise.\nIn light of these findings, we propose RoTTuning, a strategy that enriches the diversity of training environments to bolster the robustness of LLMs in tool learning.\nThe code and data are available at https://github.com/Junjie-Ye/RoTBench.", + "sections": [ + { + "section_id": "1", + "parent_section_id": null, + "section_name": "Introduction", + "text": "Tool learning has emerged as a critical concept for empowering large language models (LLMs) Brown et al. (2020 ###reference_b3###); Bai et al. (2022 ###reference_b1###); Touvron et al. (2023a ###reference_b22###) to interact with the real world Yang et al. (2023 ###reference_b27###); Mialon et al. (2023 ###reference_b13###); Qin et al. (2023a ###reference_b15###); Ye et al. (2024b ###reference_b31###). In this context, the external environment of an LLM contains an ensemble of integrated tools. Each tool is uniquely identified by its name and is described by a succinct paragraph that explains its functionality. Similarly, every parameter within these tools is characterized by its name, along with a description that clarifies its purpose, its optionality, and other pertinent details.\n###figure_1### Recent research has centered on examining how well LLMs can effectively employ tools within a carefully designed and stable environment. From one perspective, specific studies have scrutinized the outcomes of LLMs\u2019 tool usage, verifying both the accuracy of tool selection and the efficacy of the generated responses Qin et al. (2023b ###reference_b16###); Huang et al. (2023 ###reference_b11###). This analysis involved evaluating the relevance of the selected tools and the final responses in fulfilling users\u2019 requirements. On the other hand, other investigations have delved into the intricate process of tool utilization by LLMs, striving for a more comprehensive assessment of their performance in tool learning Chen et al. (2023d ###reference_b7###); Ye et al. (2024a ###reference_b30###). This includes an analysis of the diverse capabilities necessary for LLMs to excel in tool learning while also identifying any limitations they may have in this regard.\nHowever, these studies fail to account for the robustness of LLMs in the face of inevitable noise in real-world scenarios Chen et al. (2023b ###reference_b5###); Liu et al. (2023 ###reference_b12###).\nUsing Figure 1 ###reference_### as a reference, LLMs recognize the tool for querying weather information when named \u201cGet_Weather,\u201d but not when named \u201cABC,\u201d despite the tool\u2019s functionality remaining unaffected by its name.\nConsequently, it becomes imperative to investigate whether LLMs can proficiently identify these tools and configure parameters to meet user needs in noisy real-world environments. This research is essential to guarantee their reliability in practical applications.\nTo fill this gap, we introduce RoTBench, a multi-level benchmark for evaluating the robustness of LLMs in tool learning. Specifically, we establish five external environments, which can be categorized as Clean, Slight, Medium, Heavy, and Union in ascending order of noise levels. By evaluating the performance of LLMs across three critical stages: tool selection, parameter identification, and content filling, we aim to offer a thorough and intricate analysis of the stability and reliability of LLMs in tool utilization.\nThrough experiments conducted on six widely-used LLMs, we observe that the performance of these models is remarkably sensitive to noise. For instance, the performance of GPT-4 even drops significantly from 80.00 to 58.10 when there is no substantial change in manual accuracy. This underscores the pressing requirement to enhance the robustness of LLMs in tool learning. Interestingly, the GPT family of models\u2019 inherent noise correction capability appears to hinder its performance in mildly noisy environments.\nIn light of these findings, we introduce RoTTuning, a technique aimed at augmenting the adaptability of LLMs to a wide range of environments by introducing greater environmental diversity during the training phase. Our experimental results demonstrate that our approach yields an average performance improvement of 16.10 points across diverse environments.\nThe main contributions of our work are summarized as follows:\nWe introduce RoTBench, a benchmark designed to evaluate the robustness of LLMs in tool learning. This benchmark contains five environments with different levels of noise, enabling a comprehensive evaluation of robustness throughout three pivotal phases of model tool learning.\nThe experimental analyses conducted on six widely-used models underscore the imperative of improving the robustness of LLMs in tool learning. These analyses also reveal conflicts between the inherent capabilities of the models and their robustness.\nWe introduce RoTTuning, a training method for tool learning that focuses on augmenting environmental diversity. Our experiments demonstrate that this approach can effectively enhance LLMs robustness." + }, + { + "section_id": "2", + "parent_section_id": null, + "section_name": "Related Work", + "text": "" + }, + { + "section_id": "3", + "parent_section_id": null, + "section_name": "RoTBench", + "text": "As depicted in Figure 2 ###reference_###, RoTBench encompasses five environments, each characterized by varying levels of noise, facilitating a thorough evaluation of the robustness of LLMs throughout the three stages of tool usage." + }, + { + "section_id": "3.1", + "parent_section_id": "3", + "section_name": "Data Collection", + "text": "In order to thoroughly cater to real-world requirements and encompass commonly utilized tools, we utilize ToolEyes Ye et al. (2024a ###reference_b30###), an evaluation system designed for tool learning. This system defines seven real-world application scenarios.\nWithin each of these scenarios, we randomly select 15 user queries for analysis. Since the raw data offers tool information without standardized invocation paths, we have manually labeled these paths to facilitate the evaluation process. Detailed statistics of the data can be found in Table 1 ###reference_###." + }, + { + "section_id": "3.2", + "parent_section_id": "3", + "section_name": "Environments Construction", + "text": "To comprehensively assess the resilience of LLMs in tool learning, we reference the hierarchical classification of noise in previous studies Wang et al. (2021 ###reference_b25###); Zhu et al. (2023 ###reference_b33###); Dong et al. (2023 ###reference_b8###) and design five distinct external environments. These environments feature varying noise levels that affect both the tool and its parameters.\nClean-level environment employs a runtime framework developed by ToolEyes. This framework furnishes essential information to LLMs for comprehending tools, where the name of each tool epitomizes its functionality and the names of parameters signify their respective meanings. This environment comprises a total of 105 test cases. The remaining four environments are derivatives of this primary environment, each modified by incorporating distinct levels of noise.\nSlight-level environment encompasses three types of noise: insertion, omission, and substitution. These correspond to real-world occurrences such as an excess of characters, missing characters, and character errors when naming tools or parameters.\nSpecifically, we introduce noise in the following ways:\n1) We randomly select half of the available tools within the environment. For these selected tools, a random form of noise is applied, altering up to 1/3 of the characters,\nresulting in the creation of 105 new data points.\n2) For each tool, we randomly select half of the parameters and introduce noise into their names using the method described above,\ngenerating an additional 105 new data entries.\nBy combining these two approaches, we create a Slight-level environmental test set consisting of 210 test cases.\nMedium-level environment introduces two types of noise: reversal and nonsense. These mirror real-world scenarios where names are reversed or replaced with random strings, rendering the information meaningless.\nTo apply noise, we follow these procedures:\n1) We randomly select half of the available tools. For these tools, there is a 50% probability that their names will be substituted with random strings, each containing up to 10 characters. Additionally, there is a 50% chance that the names of these tools will be reversed. This process yields 105 test cases.\n2) For each tool, half of the parameters are randomly chosen. These parameters may undergo a 50% chance of having their names substituted with random strings, each containing up to 5 characters, or a 50% chance of being reversed. This leads to 105 test cases.\nIt is worth noting that if the reversal process does not alter the name, it will be replaced with a random string. Consequently, we have successfully generated 210 test cases for the Medium-level environment.\nHeavy-level environment encompasses two disruptive types of noise: exchange and addendum, reflecting real-world occurrences of name swapping and information supplementation.\nNoise is introduced as follows:\n1) All tool names within the environment are randomly shuffled. This shuffling disrupts the association between a tool\u2019s name and its functional description, challenging LLMs to accurately comprehend the tool\u2019s function despite the disorganized name. This process yields 105 test cases.\n2) Half of the tools are randomly chosen, and a new mandatory parameter is introduced with a 50% probability. This parameter is given a name consisting of a random string of up to 5 characters. LLMs are tasked with providing a specific string of up to 3 characters for the parameter based on its descriptive meaning. The names of these parameters are randomly shuffled with a 50% probability. For tools with fewer than two parameters, noise is introduced by directly adding new parameters. This process also results in 105 test cases.\nIn total, 210 Heavy-level environmental test cases have been generated.\nUnion-level environment encompasses all previously mentioned noise categories. Given that the prior noise environments already include noise for both tools and parameters, we randomly choose one noise generation method that impacts tool names and another method that affects parameters from the three previous environment levels. These selected methods are simultaneously applied to generate 105 test cases where both tool names and parameters are subjected to noise injection." + }, + { + "section_id": "3.3", + "parent_section_id": "3", + "section_name": "Staged Evaluation", + "text": "We evaluate the robustness performance of LLMs at each of stages in tool learning and analyze their respective variations.\nTool selection marks the initial phase of tool usage by LLMs. During this process, LLMs identify suitable tools for addressing the user\u2019s query by interpreting the functional descriptions offered by the external environment and subsequently output the names of these tools. It should be emphasized that the name of the tool is essentially a label; the practical deployment of the tool is governed by its functional description.\nIn evaluating a test case, the score for its tool selection is defined as follows:\nHere, equals 1 if the condition is true, and 0 otherwise. In this context, represents the tool chosen by the LLMs, while denotes the tool that needs to be selected.\nParameter identification involves recognizing the required parameters and outputting their respective names based on their specified needs, following the selection of the appropriate tool. This process necessitates choosing the mandatory parameters, while the optional ones are selected based on actual requirements. Similar to tool selection, the name of the parameter serves as an identifier; however, it is the description of the parameter that truly defines its meaning.\nFor each given test case, its parameter identification score is defined as follows:\nIn this equation, denotes the set of parameters identified by LLMs, and represents the set of parameters that should be identified.\nContent filling constitutes the concluding phase in the tool usage process. Once the tool and its corresponding parameters have been selected, LLMs are tasked with breaking down the user-provided information for populating the content of these parameters. Upon accomplishing this step, LLMs formally conclude the entire tool usage cycle, paving the way to receive the tool\u2019s output phase and initiate a new interaction.\nFor each test case, we define a content filling score as follows:\nHere, represents the total number of parameters required to be filled. is the content filled by LLMs for the th parameter, and refers to the correct content for that parameter." + }, + { + "section_id": "4", + "parent_section_id": null, + "section_name": "Experiments", + "text": "" + }, + { + "section_id": "4.1", + "parent_section_id": "4", + "section_name": "Model Selection", + "text": "To evaluate the robustness of widely-used LLMs with tool-use capabilities, we opt for testing four open-source models (i.e., ToolLLaMA-2-7B-v1 Qin et al. (2023b ###reference_b16###), ToolLLaMA-2-7B-v2 Qin et al. (2023b ###reference_b16###), NexusRaven-13B-v1 team (2023a ###reference_b20###), NexusRaven-13B-v2 team (2023b ###reference_b21###)) and two closed-source models (i.e., GPT-3.5-turbo111https://platform.openai.com/docs/models/gpt-3-5 ###reference_t-3-5###, GPT-4 OpenAI (2023 ###reference_b14###)).222The details of LLMs can be found in Appendix A ###reference_###." + }, + { + "section_id": "4.2", + "parent_section_id": "4", + "section_name": "Main Results", + "text": "As tool learning involves multiple turns of interaction between LLMs and the environment Qin et al. (2023a ###reference_b15###); Ye et al. (2024a ###reference_b30###), with intricate intermediate trajectories that cannot be easily compared, our emphasis lies on evaluating the robustness\nof various LLMs during their initial use of the tool and present the results in Table 2 ###reference_###.333The results presented are averages across various scenarios, with specific outcomes for each scenario detailed in Appendix C ###reference_###.\nThe resulting data reveals intriguing observations.\nThe robustness of current LLMs in tool learning presents considerable scope for enhancement.\nWhile human performance remains relatively stable across different environments, the performance of LLMs exhibits significant fluctuations. For instance, when transitioning from Clean-level environment to Union-level, human performance in tool selection only decreases by 2.86 points, whereas the average performance of all LLMs decreases by approximately 20.32 points.\nTo gain a clearer understanding, we employ Welch\u2019s ANOVA Bl (1947 ###reference_b2###) to analyze the significance of LLMs\u2019 performance during the content-filling stage across various environments. As illustrated in Table 3 ###reference_###, our findings underscore the consistency of human performance and the noteworthy disparities in LLMs\u2019 performance across different environments.\nConsequently, enhancing the robustness of LLMs in tool learning is an area that requires significant attention.\n###figure_2### Noise affecting tool names has a more pronounced impact on LLM performance than noise introduced to parameters. \nWe compute the absolute difference in average LLMs performance for each type of noise added to tool names or parameters, relative to their performance in the Clean-level environment, respectively. The results depicted in Figure 3 ###reference_### show that tool name noise significantly affects LLMs\u2019 tool learning performance throughout the entire process. In contrast, noise in the parameters has minimal impact on the robustness of LLMs during the tool selection stage and exerts less influence on subsequent stages compared to tool name noise. Notably, LLMs exhibit greater robustness in the Union-level environment than in the Heavy (Tool) environment, underscoring the substantial impact of tool naming on model robustness.\n###figure_3### Offering LLMs interactive examples enhances their tool learning performance, yet it does not bolster their robustness.\nAs tool learning entails multiple turns of interaction between LLMs and external environments, we initially provide the first two turns of interactions for the test cases in each environment to evaluate LLMs\u2019 performance during the third turn of interactions.\nUpon comparing GPT-4\u2019s results in the first and third turns of interactions (Figure 4 ###reference_###), it becomes evident that the provision of two turns of interaction examples leads to a consistent performance boost for GPT-4, resulting in an average performance improvement of 22.91 points across various environments.\nHowever, when examining the performance variation values, it is noteworthy that the standard deviation of its performance across environments increased from 8.14 in the first turn to 12.56 in the third turn. This observation suggests that while its performance improves, its robustness does not see a corresponding enhancement.\n###figure_4###" + }, + { + "section_id": "4.3", + "parent_section_id": "4", + "section_name": "Why do GPT family of models NOT perform well in Slight-level environment?", + "text": "A particularly intriguing finding is that, in contrast to other LLMs, the GPT family of models exhibits a lower performance in Slight-level environment compared to Medium-level, despite the limited validity of the information provided by the latter. Our thorough investigation into the model outputs has revealed that this phenomenon can be attributed to the inherent noise correction capability of the GPT family of models. For instance, when the GPT family of models selects the tool labeled as \u201cpredOict_aTge,\u201d it automatically corrects the noise within it and generates \u201cpredict_age\u201d as the output, consequently leading to an error. 444For more detailed examples, please refer to Appendix D ###reference_###.\nTable 4 ###reference_### illustrates the proportions of total error attributed to noise correction for the tool selection and parameter identification phases of the GPT family of models within the Slight-level environment. Notably, these proportions are exceptionally high, exceeding one-third for GPT-3.5-turbo. Consequently, addressing the challenge of mitigating capability degradation stemming from the model\u2019s inherent characteristics remains a pressing research concern." + }, + { + "section_id": "5", + "parent_section_id": null, + "section_name": "RoTTuning", + "text": "It is evident that enhancing the robustness of LLMs in tool learning is imperative. To tackle this issue, we introduce RoTTuning, a novel approach aimed at bolstering the robustness of LLMs through increased environmental diversity." + }, + { + "section_id": "5.1", + "parent_section_id": "5", + "section_name": "Method", + "text": "RoTTuning encompasses four phases: query expansion, trajectory generation, environment augmentation, and generalizability training (Figure 5 ###reference_###).\nTo efficiently generate high-quality user queries on a large scale, we employ the self-instruct Wang et al. (2023b ###reference_b26###) technique, drawing from the 105 existing user queries.555The specific prompt can be found in Appendix G ###reference_###. Specifically, we instruct GPT-4 to create seven fresh user queries within the context of a subset of tools, accompanied by three existing user queries and two model-generated queries. To ensure diversity in our dataset, we scrutinize the new data for redundancy in relation to each provided example and eliminate queries with Rouge-L values surpassing 0.55. This process yields a total of 4,077 new user queries.\nUpon obtaining high-quality user queries, we employ GPT-4 to produce tool learning trajectories. To ensure the accuracy of the generated trajectories, we leverage the specifically designed function call feature of GPT-4. Simultaneously, we guide GPT-4 in generating the associated thought process by incorporating a system prompt.666The specific prompt can be found in Appendix H ###reference_###. Furthermore, we specify that GPT-4\u2019s tool usage is limited to a maximum of nine turns. By considering each turn of interaction as a distinct data point, this process results in a total of 12,247 pieces of training data.\nTo enhance the variety of environments,\nwe modify the trajectories generated in the Clean-level environment to align with the characteristics of noisy environments. This strategy ensures data quality while addressing the challenges of working in noisy settings.\nTo mitigate the potential drawbacks of data coupling, we introduce randomness by augmenting 3000 trajectories for each of the Slight-, Medium-, and Heavy-level environments, along with 1500 trajectories for Union-level environments. When combined with the data from the Clean-level environment, this approach yields a total of 22,747 trajectories, representing a diverse range of environmental conditions.\nUtilizing the diversity trajectories generated, we proceed with the fine-tuning of LLaMA-2-7B-base Touvron et al. (2023b ###reference_b23###)\nand implement a position interpolation Chen et al. (2023a ###reference_b4###) technique to extend its context length to 8096. Based on previous research indicating that fine-tuning with LoRA Hu et al. (2022 ###reference_b10###) achieves superior generalization compared to full parametric fine-tuning Zeng et al. (2023 ###reference_b32###), we opt for the LoRA fine-tuning approach. We conduct 5 epochs of training to derive the ultimate model, RoTLLaMA, which exhibits robust generalization across multiple environments." + }, + { + "section_id": "5.2", + "parent_section_id": "5", + "section_name": "Experimental Results", + "text": "We carry out a series of experimental analyses with RoTLLaMA on RoTBench to verify its advantages when facing various noise environments.777More experiments can be found in Appendix E ###reference_###.\nWe analyze the performance of RoTLLaMA in various environments, and the results are presented in Table 5 ###reference_###. The results reveal that RoTLLaMA\u2019s performance stability across different environments significantly surpasses that of GPT-4. Specifically, in the tool selection phase, the extreme performance difference is only 12.38, whereas GPT-4 demonstrates a much higher extreme difference of 21.90. Furthermore, in the parameter recognition and content filling phases, the extreme performance differences are 16.19 and 14.76, respectively, both of which are smaller than GPT-4\u2019s corresponding values of 20.95 and 20.95.\n###figure_5### To evaluate the effectiveness of various components within our approach, we conducted ablation studies on RoTLLaMA.\nAs shown in Figure 6 ###reference_###, when substituting full-parameter fine-tuning for LoRA fine-tuning (i.e., w/o LoRA), there is a slight decrease in model performance, and standard deviations across environments remain largely unchanged. This suggests that employing LoRA enhances model performance without significantly impacting its robustness.\nOn the other hand, if we omit environment augmentation (i.e., w/o Augmentation), there is a notable decrease in both mean performance and a significant increase in standard deviation within each environment. This underscores the crucial role of environment augmentation in enhancing both model performance and robustness.\nFurthermore, exclusively utilizing full-parameter fine-tuning on the model (i.e., w/o Both) leads to a degradation of 16.10 points in model performance." + }, + { + "section_id": "6", + "parent_section_id": null, + "section_name": "Conclusion", + "text": "In this paper, we introduce RoTBench, a multi-level benchmark for evaluating the robustness of LLMs in tool learning. RoTBench contains five environments, each characterized by varying noise levels, shedding light on the pressing need to bolster the robustness of LLMs. Furthermore, we present RoTTuning, an innovative approach that significantly improves the robustness of LLMs in tool learning by increasing the diversity of environments during the training phase." + } + ], + "appendix": [ + { + "section_id": "Appendix 1", + "parent_section_id": null, + "section_name": "Appendix A Details of LLMs", + "text": "To evaluate the robustness of widely-used LLMs with tool-use capabilities, we opt for testing four open-source models and two closed-source models.\nAmong open-source LLMs, we have chosen four models that have undergone dedicated training for tool learning.\nAmong closed-source LLMs, we have opted for two of the most representative models from the GPT family." + }, + { + "section_id": "Appendix 2", + "parent_section_id": null, + "section_name": "Appendix B Experimental Setup", + "text": "" + }, + { + "section_id": "Appendix 3", + "parent_section_id": null, + "section_name": "Appendix C Results in Different Scenarios", + "text": "We show the performance of each model in different scenarios and document the results from Table 6 ###reference_### to Table 12 ###reference_###.\nFrom the results, we have the following observations." + }, + { + "section_id": "Appendix 4", + "parent_section_id": null, + "section_name": "Appendix D Examples for Noise Correction", + "text": "In Table 13 ###reference_###, we present instances of noise correction observed during the tool selection and parameter identification phases of the GPT family of models." + }, + { + "section_id": "Appendix 5", + "parent_section_id": null, + "section_name": "Appendix E Further Studies about RoTTuning", + "text": "We conduct additional comparative analysis to further validate the effectiveness of RoTTuning in improving the stability of LLMs in noisy environments." + }, + { + "section_id": "Appendix 6", + "parent_section_id": null, + "section_name": "Appendix F Prompt Template for Inference", + "text": "In the context of inference, both the ToolLLaMA-2-7B family of models and the GPT family of models utilize the same prompt (See Table 17 ###reference_###), whereas NexusRaven-13B-v1 and NexusRaven-13B-v2 employ distinct prompts (See Table 18 ###reference_### and Table 19 ###reference_###)." + }, + { + "section_id": "Appendix 7", + "parent_section_id": null, + "section_name": "Appendix G Prompt Template for Query Expansion", + "text": "We use GPT-4 for query expansion based on prompt in Table 20 ###reference_###." + }, + { + "section_id": "Appendix 8", + "parent_section_id": null, + "section_name": "Appendix H Prompt Template for Trajectory Generation", + "text": "We use GPT-4 for trajectory generation based on prompt in Table 21 ###reference_###." + } + ], + "tables": { + "1": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
# Sce# Query# Cat# Subcat# Tool
71054195568
\n
\n
Table 1: Statistics information of the data. \u201c# Sce\u201d, \u201c# Query\u201d, \u201c# Cat\u201d, \u201c# Subcat\u201d, and \u201c# Tool\u201d correspond to the count of scenarios, user queries, tool categories, tool subcategories, and individual tools, respectively.
\n
", + "capture": "Table 1: Statistics information of the data. \u201c# Sce\u201d, \u201c# Query\u201d, \u201c# Cat\u201d, \u201c# Subcat\u201d, and \u201c# Tool\u201d correspond to the count of scenarios, user queries, tool categories, tool subcategories, and individual tools, respectively." + }, + "2": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMsHuman
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean66.6770.4855.2473.3375.2480.0088.57
Slight57.6265.7152.8676.1959.0577.1488.57
Medium56.6759.5253.3372.3869.5284.2988.57
Heavy43.3346.6744.2962.3856.1960.0085.71
Union44.7643.8142.8656.1953.3358.1085.71
Parameter Identification
Clean45.7143.8115.2456.1947.6252.3888.57
Slight40.9540.0017.1456.6728.1044.2985.71
Medium38.1035.7114.7650.4844.2953.8182.86
Heavy28.1027.1410.0037.6224.2932.8680.00
Union35.2427.6211.4337.1427.6239.0582.86
Content Filling
Clean28.5725.711.9037.1430.4840.0074.29
Slight24.2923.813.3339.0520.0035.7174.29
Medium22.3820.951.9033.8130.4846.1971.43
Heavy14.2914.760.9530.0016.1925.2468.57
Union16.1916.191.9022.8618.1030.4871.43
\n
\n
Table 2: Performance of various LLMs in different environments, with the best performance in each environment highlighted in bold. \u201cHuman\u201d signifies the average level of human performance.
\n
", + "capture": "Table 2: Performance of various LLMs in different environments, with the best performance in each environment highlighted in bold. \u201cHuman\u201d signifies the average level of human performance." + }, + "3": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n\nSource\n\nModelsF StatisticP Value
\n\nOpen-Source\n\nToolLLaMA-2-7B-v12.47
ToolLLaMA-2-7B-v23.28
NexusRaven-13B-v10.76
NexusRaven-13B-v26.01
\n\nClosed-Source\n\nGPT-3.5-turbo6.76
GPT-45.31
\n\nHuman\n\n\u20130.04
\n
\n
Table 3: Welch\u2019s ANOVA for across the five enviroments for various LLMs. A p-value below 0.05 indicate significant differences in the data.
\n
", + "capture": "Table 3: Welch\u2019s ANOVA for across the five enviroments for various LLMs. A p-value below 0.05 indicate significant differences in the data." + }, + "4": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsTool SelectionParameter Identification
GPT-3.5-turbo33.7233.85
GPT-429.1722.83
\n
\n
Table 4: The percentage of error caused by noise correction at different stages in GPT family of models.
\n
", + "capture": "Table 4: The percentage of error caused by noise correction at different stages in GPT family of models." + }, + "5": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
LevelCleanSlightMediumHeavyUnion
76.1972.3870.4865.2463.81
55.2450.0050.4839.0544.76
42.8636.1934.2928.1028.57
\n
\n
Table 5: The score in different stages (%) of RoTLLaMA\nin various Environments.
\n
", + "capture": "Table 5: The score in different stages (%) of RoTLLaMA\nin various Environments." + }, + "6": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean60.0073.3320.0053.3386.6786.67
Slight46.6760.0030.0056.6773.3383.33
Medium36.6750.0030.0070.0073.3390.00
Heavy36.6743.3320.0040.0053.3370.00
Union40.0026.6726.6746.6760.0046.67
Parameter Identification
Clean60.0060.006.6740.0060.0073.33
Slight40.0046.6713.3340.0036.6753.33
Medium33.3340.0010.0050.0040.0063.33
Heavy36.6730.006.6713.3323.3340.00
Union40.0013.3313.3340.0026.6733.33
Content Filling
Clean26.6726.676.6733.3360.0073.33
Slight16.6713.3310.0033.3336.6753.33
Medium13.3310.006.6736.6740.0063.33
Heavy16.6713.333.3313.3320.0036.67
Union20.000.006.6733.3326.6733.33
\n
\n
Table 6: Performance of various LLMs in the text generation scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 6: Performance of various LLMs in the text generation scenario, with the best performance in each environment highlighted in bold." + }, + "7": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean80.0080.0080.0080.0086.6786.67
Slight63.3380.0070.0083.3363.3373.33
Medium60.0073.3366.6780.0083.3393.33
Heavy46.6756.6750.0060.0056.6756.67
Union40.0053.3346.6760.0060.0086.67
Parameter Identification
Clean60.0040.0026.6733.3340.0066.67
Slight50.0043.3326.6736.6726.6760.00
Medium50.0046.6716.6730.0040.0066.67
Heavy33.3340.0010.0026.6713.3326.67
Union20.0046.676.6720.0013.3360.00
Content Filling
Clean46.6733.330.0020.0026.6753.33
Slight33.3340.000.0023.3316.6753.33
Medium30.0040.000.0016.6730.0056.67
Heavy13.3320.000.0023.3310.0020.00
Union13.3340.000.0013.336.6746.67
\n
\n
Table 7: Performance of various LLMs in the data understanding scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 7: Performance of various LLMs in the data understanding scenario, with the best performance in each environment highlighted in bold." + }, + "8": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean66.6760.0040.0086.6773.3393.33
Slight60.0050.0036.6780.0060.0080.00
Medium63.3346.6743.3376.6773.3390.00
Heavy46.6736.6736.6773.3346.6756.67
Union53.3346.6726.6766.6760.0073.33
Parameter Identification
Clean60.0046.676.6773.3353.3353.33
Slight53.3343.336.6766.6736.6740.00
Medium46.6740.0010.0060.0053.3353.33
Heavy30.0030.006.6743.3316.6723.33
Union40.0033.336.6740.0033.3340.00
Content Filling
Clean33.3320.000.0033.3320.0033.33
Slight30.0020.000.0030.0020.0030.00
Medium16.6710.000.0026.6730.0040.00
Heavy6.6720.000.0026.6710.0020.00
Union13.3313.330.006.6726.6740.00
\n
\n
Table 8: Performance of various LLMs in the real-time search scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 8: Performance of various LLMs in the real-time search scenario, with the best performance in each environment highlighted in bold." + }, + "9": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean86.6773.3373.3366.6780.0073.33
Slight80.0080.0073.3370.0066.6773.33
Medium83.3380.0073.3366.6780.0086.67
Heavy60.0050.0070.0066.6770.0063.33
Union80.0053.3373.3366.6766.6753.33
Parameter Identification
Clean40.0040.006.6760.0053.3346.67
Slight56.6746.6710.0060.0036.6746.67
Medium53.3346.676.6753.3356.6746.67
Heavy36.6720.0013.3350.0040.0043.33
Union73.3340.0013.3353.3340.0033.33
Content Filling
Clean20.0013.330.0020.0020.0020.00
Slight33.3320.000.0020.0016.6713.33
Medium40.0026.670.0016.6726.6723.33
Heavy20.006.670.0026.6716.6713.33
Union40.0026.670.0013.3320.006.67
\n
\n
Table 9: Performance of various LLMs in the application manipulation scenatio, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 9: Performance of various LLMs in the application manipulation scenatio, with the best performance in each environment highlighted in bold." + }, + "10": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean53.3360.0040.0066.6773.3366.67
Slight46.6763.3343.3373.3350.0070.00
Medium50.0053.3350.0063.3360.0073.33
Heavy23.3340.0043.3350.0050.0050.00
Union40.0053.3353.3346.6740.0046.67
Parameter Identification
Clean26.6740.0013.3353.3326.6740.00
Slight30.0026.6713.3353.3310.0026.67
Medium26.6726.6713.3336.6740.0040.00
Heavy6.6716.673.3330.0016.6726.67
Union26.6720.006.6726.6726.6740.00
Content Filling
Clean20.0026.670.0040.0013.3333.33
Slight16.6720.000.0043.3310.0023.33
Medium13.3323.330.0033.3330.0040.00
Heavy6.6710.000.0026.6710.0026.67
Union6.6720.000.0026.676.6726.67
\n
\n
Table 10: Performance of various LLMs in the personal life scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 10: Performance of various LLMs in the personal life scenario, with the best performance in each environment highlighted in bold." + }, + "11": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean60.0080.0073.3373.3346.6773.33
Slight50.0063.3366.6783.3343.3373.33
Medium43.3356.6763.3376.6753.3373.33
Heavy50.0053.3353.3380.0053.3356.67
Union26.6733.3346.6753.3340.0040.00
Parameter Identification
Clean26.6733.3326.6753.3340.0040.00
Slight16.6720.0023.3360.0030.0036.67
Medium16.6716.6730.0060.0043.3350.00
Heavy23.3326.6716.6756.6733.3336.67
Union20.0013.3320.0040.0040.0040.00
Content Filling
Clean20.0026.670.0046.6726.6733.33
Slight13.3316.676.6756.6723.3330.00
Medium16.6713.333.3353.3333.3346.67
Heavy23.3316.673.3353.3326.6730.00
Union13.336.670.0033.3333.3333.33
\n
\n
Table 11: Performance of various LLMs in the information retrieval scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 11: Performance of various LLMs in the information retrieval scenario, with the best performance in each environment highlighted in bold." + }, + "12": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelsOpen-Source LLMsClosed-Source LLMs
ToolLLaMA-2-7B-v1ToolLLaMA-2-7B-v2NexusRaven-13B-v1NexusRaven-13B-v2GPT-3.5-turboGPT-4
Tool Selection
Clean46.6753.3353.3373.3366.6766.67
Slight43.3350.0043.3373.3343.3373.33
Medium46.6743.3340.0066.6750.0070.00
Heavy26.6736.6736.6753.3350.0053.33
Union20.0026.6726.6746.6733.3346.67
Parameter Identification
Clean33.3333.3320.0066.6760.0040.00
Slight26.6740.0023.3366.6720.0046.67
Medium26.6723.3316.6756.6736.6750.00
Heavy16.6716.6713.3333.3326.6723.33
Union13.3313.3313.3333.3313.3326.67
Content Filling
Clean33.3333.336.6760.0046.6733.33
Slight26.6736.676.6760.0016.6746.67
Medium26.6723.333.3346.6723.3346.67
Heavy13.3316.670.0033.3320.0023.33
Union6.676.676.6726.676.6726.67
\n
\n
Table 12: Performance of various LLMs in the financial transactions scenario, with the best performance in each environment highlighted in bold.
\n
", + "capture": "Table 12: Performance of various LLMs in the financial transactions scenario, with the best performance in each environment highlighted in bold." + }, + "13": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
\n\nModels\n\nStageQueryNoisy PartModel Output
\n\nGPT-3.5-turbo\n\n\n\nTool Selection\n\n\n\nI have a list of names: Maria, Juan, and Carlos. Can you predict their ages?\n\n\n\nTool: predOict_aTge\nDescription: Predicts the ages of one or more people given their names.\nParameters: \u2026\n\n\n\nTool: predict_age\n\n
\n\nGPT-3.5-turbo\n\n\n\nParameter Identification\n\n\n\nI want to know what will be the output if we run these commands sequentially in bash: \u2018cd /home/user/documents\u2019, \u2018ls -a.\u2019\n\n\n\nTool: execute_bash_code\nDescription: \u2026\nParameters: Nommands (Required)\nParam Description: The command string to be executed.\n\n\n\nParameters: commands\n\n
\n\nGPT-4\n\n\n\nTool Selection\n\n\n\nIs there any social event available which requires high accessibility and is free of cost?\n\n\n\nTool: get_activty_by_ye\nDescription: Find a random activity with a given type.\nParameters: \u2026\n\n\n\nTool: get_activity_by_type\n\n
\n\nGPT-4\n\n\n\nParameter Identification\n\n\n\nGet me quotes for symbols AAPL, MSFT, and GOOGL from US.\n\n\n\nTool: get_quotes\nDescription: \u2026\nParameters: ymbols (Required)\nParam Description: The value of symbol field returned in auto-complete endpoint. Separated by comma for multiple entities.\n\n\n\nParameters: symbols\n\n
\n
\n
Table 13: Examples for noise correction of GPT family of models.
\n
", + "capture": "Table 13: Examples for noise correction of GPT family of models." + }, + "14": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Approachesw/o Augmentationw/ Aug.Slightw/ Aug.Mediumw/ Aug.Heavyw/ Aug.Union
Tool Selection
Clean74.2970.4872.3875.2471.43
Slight65.2471.9062.3869.0564.29
Medium61.9068.5765.7170.9566.67
Heavy50.4851.9049.5260.4855.24
Union40.0053.3351.4353.3355.24
Parameter Identification
Clean60.9557.1459.0559.0560.95
Slight47.1453.8146.1948.1046.19
Medium42.8651.9048.5748.5752.38
Heavy14.2918.1015.2433.8126.67
Union21.9032.3828.5731.4336.19
Content Filling
Clean45.7143.8148.5744.7642.86
Slight31.9040.0031.9035.2430.95
Medium30.4838.1036.6736.6738.57
Heavy10.4812.8610.48\n24.7619.05
Union12.3819.0517.1421.9027.62
\n
\n
Table 14: Performance of the LLMs trained by data augmented from single environment, compared with the model trained using LoRA without augmentation. The best performance in each environment is highlighted in bold.
\n
", + "capture": "Table 14: Performance of the LLMs trained by data augmented from single environment, compared with the model trained using LoRA without augmentation. The best performance in each environment is highlighted in bold." + }, + "15": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ToolLLaMA-2-NexusRaven-GPT-RoTLLaMA
7B-v17B-v213B-v113B-v23.5-turbo4
53656050233
\n
\n
Table 15: The number of tool hallucinations for each LLM in all environments.
\n
", + "capture": "Table 15: The number of tool hallucinations for each LLM in all environments." + }, + "16": { + "table_html": "
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
LevelCleanSlightMediumHeavyUnion
69.5269.0570.9564.7656.19
52.3845.2450.9540.9539.05
38.1032.3834.7631.4328.57
\n
Table 16: The score in different stages (%) of RoTToolLLaMA\nin various Environments.
\n
", + "capture": "Table 16: The score in different stages (%) of RoTToolLLaMA\nin various Environments." + }, + "17": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
System
\n\nYou are an expert in using tools to handle real-time queries from users.\n\n
\n\nFirst I will give you the task description, and your task start.\n\n
\n\nAt each step, your task is to give your thought to analyze the current state, decide the next step, with a function call to actually execute your step.\n\n
\n\nAfter the call, you will get the call result, and you are now in a new state.\n\n
\n\nThen you will analyze your status now, then decide what to do next\u2026\n\n
\n\nAfter many (Thought-call) pairs, you finally perform the task, then you can give your final answer.\n\n
\n\nDesired format:\n\n
\n\nThought: The thought\n\n
\n\nAction: The tool you decide to use\n\n
\n\nAction Input: The parameters for the tool\n\n
\n\nRemember:\n\n
\n\n1. You should ALWAYS think about what to do, but all the thought is short, at most in 3 sentences.\n\n
\n\n2. The action to take should be one of the given tools below.\n\n
\n\n3. The \u201cAction Input\u201d needs to provide a dict similar to {parameter_1: value_1, parameter_2: value_2} to call action.\n\n
\n\n4. Always use the \u201cfinish\u201d tool upon task completion. The final answer should be comprehensive enough for the user. If the task is unmanageable, use the \u201cfinish\u201d tool and respond with \u201cI cannot handle the task.\u201d\n\n
\n\nTask description: You should use tools to help handle the real time user queries. Specifically, you have access of the following tools:\n\n
\n\n{Tool Document}\n\n
\n\nLet\u2019s Begin!\n\n
User
\n\n{Query}\n\n
\n\nBegin!\n\n
\n
\n
Table 17: The prompt used for ToolLLaMA-2-7B family of models and GPT\nfamily of models, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs and \u201c{Query}\u201d represents the query given by the user.
\n
", + "capture": "Table 17: The prompt used for ToolLLaMA-2-7B family of models and GPT\nfamily of models, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs and \u201c{Query}\u201d represents the query given by the user." + }, + "18": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
User
\n\n{Tool Document}\n\n
\n\nUser Query: Question: {Query}\n\n
\n\nPlease pick a function from the above options that best answers the user query and fill in the appropriate arguments.\n\n
\n
\n
Table 18: The prompt used for NexusRaven-13B-v1, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs\nand \u201c{Query}\u201d represents the query given by the user.
\n
", + "capture": "Table 18: The prompt used for NexusRaven-13B-v1, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs\nand \u201c{Query}\u201d represents the query given by the user." + }, + "19": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
User
\n\n{Tool Document}\n\n
\n\nUser Query: {Query}\n\n
\n
\n
Table 19: The prompt used for NexusRaven-13B-v2, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs\nand \u201c{Query}\u201d represents the query given by the user.
\n
", + "capture": "Table 19: The prompt used for NexusRaven-13B-v2, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs\nand \u201c{Query}\u201d represents the query given by the user." + }, + "20": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
System
\n\nAs an expert, your assignment is to utilize the comprehensive documentation of various tools to develop a series of problem scenarios that these tools can resolve. Ideally, each scenario should necessitate the sequential use of multiple tools for its resolution.\n\n
\n\nRemember:\n\n
\n\n1. The tools employed to address a problem should be a subset of the tools detailed in the provided documentation; ideally, each problem should require the use of more than one tool.\n\n
\n\n2. The parameter values needed by each tool can either be directly extracted from the query or obtained by invoking the specified other tool.\n\n
\n\n3. The problem scenario should be expressed in a way that is understandable to humans, while also showcasing the diverse functions of the provided tools and their interrelationships.\n\n
\n\nHere is the documentation of various tools: {Tool Document}\n\n
User
\n\nPlease generate 12 diverse queries according to the documentation.\n\n
\n\nExamples:\n\n
\n\n{Examples}\n\n
\n
\n
Table 20: The prompt for query expansion, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs and \u201c{Examples}\u201d represents the examples for LLMs.
\n
", + "capture": "Table 20: The prompt for query expansion, where \u201c{Tool Document}\u201d represents the tool documentation given to LLMs and \u201c{Examples}\u201d represents the examples for LLMs." + }, + "21": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
System
\n\nYou are an expert in using tools to handle real-time queries from users.\n\n
\n\nAt each step, your task is to give your thought to analyze the current state, decide the next step, with a function call to actually execute your step.\n\n
\n\nAfter the call, you will get the call result, and you are now in a new state.\n\n
\n\nThen you will analyze your status now, then decide what to do next\u2026\n\n
\n\nAfter a series of these thought-action pairs, you will complete the task and provide the final answer.\n\n
\n\nRemember:\n\n
\n\n1. You must ALWAYS select a specific function to execute your idea at each step.\n\n
\n\n2. Before calling any function, you should ALWAYS give your thought, but limit it to a maximum of three sentences.\n\n
\n\n3. ALWAYS use the \u201cfinish\u201d tool upon task completion. The final answer should be comprehensive enough for the user. If the task is unmanageable, use the \u201cfinish\u201d tool and respond with \u201cI cannot handle the task\u201d.\n\n
\n\nLet\u2019s begin!\n\n
User
\n\n{Query}\n\n
\n\nBegin!\n\n
\n
\n
Table 21: The prompt for trajectory generation, where \u201c{Query}\u201d represents the query given by the user.
\n
", + "capture": "Table 21: The prompt for trajectory generation, where \u201c{Query}\u201d represents the query given by the user." + } + }, + "image_paths": { + "1": { + "figure_path": "2401.08326v3_figure_1.png", + "caption": "Figure 1: Example of noise affecting tool selection for LLMs. Although the functionality of the tool remains unaffected by its name, renaming \u201cGet_Weather\u201d as \u201cABC\u201d impedes LLMs from utilizing the tool properly.", + "url": "http://arxiv.org/html/2401.08326v3/x1.png" + }, + "2": { + "figure_path": "2401.08326v3_figure_2.png", + "caption": "Figure 2: The framework of RoTBench. RoTBench encompasses five environments (i.e., Clean, Slight, Medium, Heavy, and Union), each introduces various noise to the tool and parameters, facilitating a thorough evaluation of the robustness performance of LLMs throughout the three stages of tool usage (i.e., tool selection, parameter identification, and content filling).", + "url": "http://arxiv.org/html/2401.08326v3/x2.png" + }, + "3": { + "figure_path": "2401.08326v3_figure_3.png", + "caption": "Figure 3: Absolute difference between the average performance of LLMs in various noisy environments and their average performance in Clean-level environment.", + "url": "http://arxiv.org/html/2401.08326v3/x3.png" + }, + "4": { + "figure_path": "2401.08326v3_figure_4.png", + "caption": "Figure 4: The performance of GPT-4 during the content filling phase in the first and third rounds of interaction.", + "url": "http://arxiv.org/html/2401.08326v3/x4.png" + }, + "5": { + "figure_path": "2401.08326v3_figure_5.png", + "caption": "Figure 5: Illustration of RoTTuning. RoTTuning encompasses four phases, aiming at bolstering the robustness of LLMs in tool learning through increased environmental diversity.", + "url": "http://arxiv.org/html/2401.08326v3/x5.png" + }, + "6": { + "figure_path": "2401.08326v3_figure_6.png", + "caption": "Figure 6: The means and standard deviations of our model\u2019s performance in the five environments.", + "url": "http://arxiv.org/html/2401.08326v3/x6.png" + } + }, + "validation": true, + "references": [ + { + "1": { + "title": "Constitutional AI: harmlessness from AI feedback.", + "author": "Yuntao Bai, Saurav Kadavath, Sandipan Kundu, Amanda Askell, Jackson Kernion, Andy Jones, Anna Chen, Anna Goldie, Azalia Mirhoseini, Cameron McKinnon, Carol Chen, Catherine Olsson, Christopher Olah, Danny Hernandez, Dawn Drain, Deep Ganguli, Dustin Li, Eli Tran-Johnson, Ethan Perez, Jamie Kerr, Jared Mueller, Jeffrey Ladish, Joshua Landau, Kamal Ndousse, Kamile Lukosiute, Liane Lovitt, Michael Sellitto, Nelson Elhage, Nicholas Schiefer, Noem\u00ed Mercado, Nova DasSarma, Robert Lasenby, Robin Larson, Sam Ringer, Scott Johnston, Shauna Kravec, Sheer El Showk, Stanislav Fort, Tamera Lanham, Timothy Telleen-Lawton, Tom Conerly, Tom Henighan, Tristan Hume, Samuel R. Bowman, Zac Hatfield-Dodds, Ben Mann, Dario Amodei, Nicholas Joseph, Sam McCandlish, Tom Brown, and Jared Kaplan. 2022.", + "venue": "CoRR, abs/2212.08073.", + "url": "https://doi.org/10.48550/ARXIV.2212.08073" + } + }, + { + "2": { + "title": "The generalisation of student\u2019s problems when several different population variances are involved.", + "author": "Welch Bl. 1947.", + "venue": "Biometrika, 34(1-2):28\u201335.", + "url": "http://pds9.egloos.com/pds/200804/26/44/2332510.pdf" + } + }, + { + "3": { + "title": "Language models are few-shot learners.", + "author": "Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020.", + "venue": "In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual.", + "url": "https://proceedings.neurips.cc/paper/2020/hash/1457c0d6bfcb4967418bfb8ac142f64a-Abstract.html" + } + }, + { + "4": { + "title": "Extending context window of large language models via positional interpolation.", + "author": "Shouyuan Chen, Sherman Wong, Liangjian Chen, and Yuandong Tian. 2023a.", + "venue": "CoRR, abs/2306.15595.", + "url": "https://doi.org/10.48550/ARXIV.2306.15595" + } + }, + { + "5": { + "title": "Improving the robustness of summarization systems with dual augmentation.", + "author": "Xiuying Chen, Guodong Long, Chongyang Tao, Mingzhe Li, Xin Gao, Chengqi Zhang, and Xiangliang Zhang. 2023b.", + "venue": "In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 6846\u20136857. Association for Computational Linguistics.", + "url": "https://doi.org/10.18653/V1/2023.ACL-LONG.378" + } + }, + { + "6": { + "title": "How robust is GPT-3.5 to predecessors? A comprehensive study on language understanding tasks.", + "author": "Xuanting Chen, Junjie Ye, Can Zu, Nuo Xu, Rui Zheng, Minlong Peng, Jie Zhou, Tao Gui, Qi Zhang, and Xuanjing Huang. 2023c.", + "venue": "CoRR, abs/2303.00293.", + "url": "https://doi.org/10.48550/ARXIV.2303.00293" + } + }, + { + "7": { + "title": "T-eval: Evaluating the tool utilization capability step by step.", + "author": "Zehui Chen, Weihua Du, Wenwei Zhang, Kuikun Liu, Jiangning Liu, Miao Zheng, Jingming Zhuo, Songyang Zhang, Dahua Lin, Kai Chen, and Feng Zhao. 2023d.", + "venue": null, + "url": "http://arxiv.org/abs/2312.14033" + } + }, + { + "8": { + "title": "Demonsf: A multi-task demonstration-based generative framework for noisy slot filling task.", + "author": "Guanting Dong, Tingfeng Hui, Zhuoma Gongque, Jinxu Zhao, Daichi Guo, Gang Zhao, Keqing He, and Weiran Xu. 2023.", + "venue": "In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 10506\u201310518. Association for Computational Linguistics.", + "url": "https://doi.org/10.18653/V1/2023.FINDINGS-EMNLP.705" + } + }, + { + "9": { + "title": "Toolkengpt: Augmenting frozen language models with massive tools via tool embeddings.", + "author": "Shibo Hao, Tianyang Liu, Zhen Wang, and Zhiting Hu. 2023.", + "venue": "CoRR, abs/2305.11554.", + "url": "https://doi.org/10.48550/ARXIV.2305.11554" + } + }, + { + "10": { + "title": "Lora: Low-rank adaptation of large language models.", + "author": "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022.", + "venue": "In The Tenth International Conference on Learning Representations, ICLR 2022, Virtual Event, April 25-29, 2022. OpenReview.net.", + "url": "https://openreview.net/forum?id=nZeVKeeFYf9" + } + }, + { + "11": { + "title": "Metatool benchmark for large language models: Deciding whether to use tools and which to use.", + "author": "Yue Huang, Jiawen Shi, Yuan Li, Chenrui Fan, Siyuan Wu, Qihui Zhang, Yixin Liu, Pan Zhou, Yao Wan, Neil Zhenqiang Gong, and Lichao Sun. 2023.", + "venue": "CoRR, abs/2310.03128.", + "url": "https://doi.org/10.48550/ARXIV.2310.03128" + } + }, + { + "12": { + "title": "Towards robust and safe reinforcement learning with benign off-policy data.", + "author": "Zuxin Liu, Zijian Guo, Zhepeng Cen, Huan Zhang, Yihang Yao, Hanjiang Hu, and Ding Zhao. 2023.", + "venue": "In International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pages 21586\u201321610. PMLR.", + "url": "https://proceedings.mlr.press/v202/liu23l.html" + } + }, + { + "13": { + "title": "Augmented language models: a survey.", + "author": "Gr\u00e9goire Mialon, Roberto Dess\u00ec, Maria Lomeli, Christoforos Nalmpantis, Ramakanth Pasunuru, Roberta Raileanu, Baptiste Rozi\u00e8re, Timo Schick, Jane Dwivedi-Yu, Asli Celikyilmaz, Edouard Grave, Yann LeCun, and Thomas Scialom. 2023.", + "venue": "CoRR, abs/2302.07842.", + "url": "https://doi.org/10.48550/ARXIV.2302.07842" + } + }, + { + "14": { + "title": "GPT-4 technical report.", + "author": "OpenAI. 2023.", + "venue": "CoRR, abs/2303.08774.", + "url": "https://doi.org/10.48550/ARXIV.2303.08774" + } + }, + { + "15": { + "title": "Tool learning with foundation models.", + "author": "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, Yi Ren Fung, Yusheng Su, Huadong Wang, Cheng Qian, Runchu Tian, Kunlun Zhu, Shihao Liang, Xingyu Shen, Bokai Xu, Zhen Zhang, Yining Ye, Bowen Li, Ziwei Tang, Jing Yi, Yuzhang Zhu, Zhenning Dai, Lan Yan, Xin Cong, Yaxi Lu, Weilin Zhao, Yuxiang Huang, Junxi Yan, Xu Han, Xian Sun, Dahai Li, Jason Phang, Cheng Yang, Tongshuang Wu, Heng Ji, Zhiyuan Liu, and Maosong Sun. 2023a.", + "venue": "CoRR, abs/2304.08354.", + "url": "https://doi.org/10.48550/ARXIV.2304.08354" + } + }, + { + "16": { + "title": "Toolllm: Facilitating large language models to master 16000+ real-world apis.", + "author": "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Runchu Tian, Ruobing Xie, Jie Zhou, Mark Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2023b.", + "venue": "CoRR, abs/2307.16789.", + "url": "https://doi.org/10.48550/ARXIV.2307.16789" + } + }, + { + "17": { + "title": "Code llama: Open foundation models for code.", + "author": "Baptiste Rozi\u00e8re, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, J\u00e9r\u00e9my Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton-Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre D\u00e9fossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, and Gabriel Synnaeve. 2023.", + "venue": "CoRR, abs/2308.12950.", + "url": "https://doi.org/10.48550/ARXIV.2308.12950" + } + }, + { + "18": { + "title": "Toolformer: Language models can teach themselves to use tools.", + "author": "Timo Schick, Jane Dwivedi-Yu, Roberto Dess\u00ec, Roberta Raileanu, Maria Lomeli, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023.", + "venue": "CoRR, abs/2302.04761.", + "url": "https://doi.org/10.48550/ARXIV.2302.04761" + } + }, + { + "19": { + "title": "Toolalpaca: Generalized tool learning for language models with 3000 simulated cases.", + "author": "Qiaoyu Tang, Ziliang Deng, Hongyu Lin, Xianpei Han, Qiao Liang, and Le Sun. 2023.", + "venue": "CoRR, abs/2306.05301.", + "url": "https://doi.org/10.48550/ARXIV.2306.05301" + } + }, + { + "20": { + "title": "Nexusraven: Surpassing the state-of-the-art in open-source function calling llms.", + "author": "Nexusflow.ai team. 2023a.", + "venue": null, + "url": "http://nexusflow.ai/blog" + } + }, + { + "21": { + "title": "Nexusraven-v2: Surpassing gpt-4 for zero-shot function calling.", + "author": "Nexusflow.ai team. 2023b.", + "venue": null, + "url": "https://nexusflow.ai/blogs/ravenv2" + } + }, + { + "22": { + "title": "Llama: Open and efficient foundation language models.", + "author": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, Aur\u00e9lien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a.", + "venue": "CoRR, abs/2302.13971.", + "url": "https://doi.org/10.48550/ARXIV.2302.13971" + } + }, + { + "23": { + "title": "Llama 2: Open foundation and fine-tuned chat models.", + "author": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton-Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez, Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushkar Mishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing Ellen Tan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aur\u00e9lien Rodriguez, Robert Stojnic, Sergey Edunov,\nand Thomas Scialom. 2023b.", + "venue": "CoRR, abs/2307.09288.", + "url": "https://doi.org/10.48550/ARXIV.2307.09288" + } + }, + { + "24": { + "title": "On the robustness of chatgpt: An adversarial and out-of-distribution perspective.", + "author": "Jindong Wang, Xixu Hu, Wenxin Hou, Hao Chen, Runkai Zheng, Yidong Wang, Linyi Yang, Haojun Huang, Wei Ye, Xiubo Geng, Binxing Jiao, Yue Zhang, and Xing Xie. 2023a.", + "venue": "CoRR, abs/2302.12095.", + "url": "https://doi.org/10.48550/ARXIV.2302.12095" + } + }, + { + "25": { + "title": "Textflint: Unified multilingual robustness evaluation toolkit for natural language processing.", + "author": "Xiao Wang, Qin Liu, Tao Gui, Qi Zhang, Yicheng Zou, Xin Zhou, Jiacheng Ye, Yongxin Zhang, Rui Zheng, Zexiong Pang, Qinzhuo Wu, Zhengyan Li, Chong Zhang, Ruotian Ma, Zichu Fei, Ruijian Cai, Jun Zhao, Xingwu Hu, Zhiheng Yan, Yiding Tan, Yuan Hu, Qiyuan Bian, Zhihua Liu, Shan Qin, Bolin Zhu, Xiaoyu Xing, Jinlan Fu, Yue Zhang, Minlong Peng, Xiaoqing Zheng, Yaqian Zhou, Zhongyu Wei, Xipeng Qiu, and Xuanjing Huang. 2021.", + "venue": "In Proceedings of the Joint Conference of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL 2021 - System Demonstrations, Online, August 1-6, 2021, pages 347\u2013355. Association for Computational Linguistics.", + "url": "https://doi.org/10.18653/V1/2021.ACL-DEMO.41" + } + }, + { + "26": { + "title": "Self-instruct: Aligning language models with self-generated instructions.", + "author": "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. 2023b.", + "venue": "In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 13484\u201313508. Association for Computational Linguistics.", + "url": "https://doi.org/10.18653/V1/2023.ACL-LONG.754" + } + }, + { + "27": { + "title": "Foundation models for decision making: Problems, methods, and opportunities.", + "author": "Sherry Yang, Ofir Nachum, Yilun Du, Jason Wei, Pieter Abbeel, and Dale Schuurmans. 2023.", + "venue": "CoRR, abs/2303.04129.", + "url": "https://doi.org/10.48550/ARXIV.2303.04129" + } + }, + { + "28": { + "title": "React: Synergizing reasoning and acting in language models.", + "author": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. 2023.", + "venue": "In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net.", + "url": "https://openreview.net/pdf?id=WE_vluYUL-X" + } + }, + { + "29": { + "title": "A comprehensive capability analysis of GPT-3 and GPT-3.5 series models.", + "author": "Junjie Ye, Xuanting Chen, Nuo Xu, Can Zu, Zekai Shao, Shichun Liu, Yuhan Cui, Zeyang Zhou, Chao Gong, Yang Shen, Jie Zhou, Siming Chen, Tao Gui, Qi Zhang, and Xuanjing Huang. 2023.", + "venue": "CoRR, abs/2303.10420.", + "url": "https://doi.org/10.48550/ARXIV.2303.10420" + } + }, + { + "30": { + "title": "Tooleyes: Fine-grained evaluation for tool learning capabilities of large language models in real-world scenarios.", + "author": "Junjie Ye, Guanyu Li, Songyang Gao, Caishuang Huang, Yilong Wu, Sixian Li, Xiaoran Fan, Shihan Dou, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024a.", + "venue": "CoRR, abs/2401.00741.", + "url": "https://doi.org/10.48550/ARXIV.2401.00741" + } + }, + { + "31": { + "title": "Toolsword: Unveiling safety issues of large language models in tool learning across three stages.", + "author": "Junjie Ye, Sixian Li, Guanyu Li, Caishuang Huang, Songyang Gao, Yilong Wu, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024b.", + "venue": "In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 2181\u20132211. Association for Computational Linguistics.", + "url": "https://aclanthology.org/2024.acl-long.119" + } + }, + { + "32": { + "title": "Agenttuning: Enabling generalized agent abilities for llms.", + "author": "Aohan Zeng, Mingdao Liu, Rui Lu, Bowen Wang, Xiao Liu, Yuxiao Dong, and Jie Tang. 2023.", + "venue": "ArXiv, abs/2310.12823.", + "url": "https://api.semanticscholar.org/CorpusID:264306101" + } + }, + { + "33": { + "title": "Promptbench: Towards evaluating the robustness of large language models on adversarial prompts.", + "author": "Kaijie Zhu, Jindong Wang, Jiaheng Zhou, Zichen Wang, Hao Chen, Yidong Wang, Linyi Yang, Weirong Ye, Neil Zhenqiang Gong, Yue Zhang, and Xingxu Xie. 2023.", + "venue": "ArXiv, abs/2306.04528.", + "url": "https://api.semanticscholar.org/CorpusID:259095572" + } + }, + { + "34": { + "title": "Toolqa: A dataset for LLM question answering with external tools.", + "author": "Yuchen Zhuang, Yue Yu, Kuan Wang, Haotian Sun, and Chao Zhang. 2023.", + "venue": "CoRR, abs/2306.13304.", + "url": "https://doi.org/10.48550/ARXIV.2306.13304" + } + } + ], + "url": "http://arxiv.org/html/2401.08326v3" +} \ No newline at end of file