diff --git "a/20240921/2409.13980v1.json" "b/20240921/2409.13980v1.json" new file mode 100644--- /dev/null +++ "b/20240921/2409.13980v1.json" @@ -0,0 +1,659 @@ +{ + "title": "Enhancing Advanced Visual Reasoning Ability of Large Language Models", + "abstract": "Recent advancements in Vision-Language (VL) research have sparked new benchmarks for complex visual reasoning, challenging models\u2019 advanced reasoning ability. Traditional Vision-Language Models (VLMs) perform well in visual perception tasks while struggling with complex reasoning scenarios. Conversely, Large Language Models (LLMs) demonstrate robust text reasoning capabilities; however, they lack visual acuity. To bridge this gap, we propose Complex Visual Reasoning Large Language Models (CVR-LLM), capitalizing on VLMs\u2019 visual perception proficiency and LLMs\u2019 extensive reasoning capability. Unlike recent multimodal large language models (MLLMs) that require a projection layer, our approach transforms images into detailed, context-aware descriptions using an iterative self-refinement loop and leverages LLMs\u2019 text knowledge for accurate predictions without extra training. We also introduce a novel multi-modal in-context learning (ICL) methodology to enhance LLMs\u2019 contextual understanding and reasoning. Additionally, we introduce Chain-of-Comparison (CoC), a step-by-step comparison technique enabling contrasting various aspects of predictions. Our CVR-LLM presents the first comprehensive study across a wide array of complex visual reasoning tasks and achieves SOTA performance among all.", + "sections": [ + { + "section_id": "1", + "parent_section_id": null, + "section_name": "Introduction", + "text": "The concept of complex visual reasoning was introduced with Visual Commonsense Reasoning (VCR) dataset Zellers et al. (2019 ###reference_b49###) in 2019, which tests models\u2019 ability to understand visual content as well as commonsense cognition. However, the development in this field has remained relatively subdued, primarily due to Vision-Language Models\u2019 (VLMs) limitations in incorporating commonsense knowledge Gan et al. (2022 ###reference_b15###). Recent years have seen significant advancements in complex linguistic reasoning tasks Cobbe et al. (2021 ###reference_b12###); Wei et al. (2022 ###reference_b45###) due to the emerging GPT3 Brown et al. (2020 ###reference_b5###), LLaMA Touvron et al. (2023a ###reference_b42###), and Vicuna Chiang et al. (2023 ###reference_b11###). This leap forward has triggered a renewed interest in the complex visual reasoning area, exploring how visual perception can enhance linguistic inference and potentially overcome previous hurdles Gan et al. (2022 ###reference_b15###). It has led to innovative benchmarks focusing on various aspects: commonsense reasoning - WinoGAViL Bitton et al. (2022 ###reference_b3###), compositionality - Winoground Thrush et al. (2022 ###reference_b41###), weird image explanation - Whoops Bitton-Guetta et al. (2023 ###reference_b4###), and humor understanding - NYCCC Hessel et al. (2022 ###reference_b16###). These tasks demand models not only accurately interpret image content, but also integrate knowledge from daily experiences, general commonsense, cultural context, and humor sense. For example, a synthetic image, as shown in Whoop\u2019s example in Figure 1 ###reference_### of \u201cThe portrait of the Mona Lisa depicts a stern male face.\u201d contradicts the cultural context, as the famous painting Mona Lisa depicts a female face.\n###figure_1### In this paper, we introduce a novel method named Complex Visual Reasoning Large Language Models (CVR-LLM), based on the \"VLMs + LLMs\" concept. Recent multimodal large language models (MLLMs) like LLaVA Liu et al. (2024 ###reference_b25###, 2023a ###reference_b24###) and MiniGPT4 Zhu et al. (2023 ###reference_b55###); Chen et al. (2023 ###reference_b7###) have proven effective in many VL tasks. However, these models are resource-intensive, relying on millions of image-text pairs for projection layer learning. To overcome this limitation, our approach leverages the visual perception strengths of VLMs to translate images into context-aware image descriptions (CaID) via an inference-only, dual-loop self-refinement process that incorporates feedback from LLMs. These detailed descriptions enhance the LLMs\u2019 inference process, transforming multi-modal tasks into simpler single-modal challenges and streamlining the overall process. In addition, we develop a unique multi-modal in-context learning (ICL) approach named Complex Visual Reasoning ICL (CVR-ICL), which enhances the reasoning capacities of LLMs within a range of complex multi-modal environments. Figure 2 ###reference_### provides an illustration of how our CVR-LLM is applied to the Winoground task. It describes the images as appropriate sentences via CaID and utilizes the sophisticated reasoning and ICL abilities of LLMs through CVR-ICL for more accurate predictions.\n###figure_2### Our research stands as the pioneering study to explore such a broad array of benchmarks (WinoGAViL, Winoground, Whoops, VCR, and NYCCC), proposing a paradigm centred on the \"VLM+LLM\" concept for addressing complex visual reasoning tasks. Experimental results show that CVR-LLM achieves SOTA performance across all five tasks. Further ablation studies and comparative analyses reveal the effectiveness of each module and the superiority of our method over previous approaches. Particularly in comparative analysis, we introduce the Chain-of-Comparison (CoC) technique, inspired by \"Chain-of-Thought\" and utilizing GPT4 Achiam et al. (2023 ###reference_b1###), to address the limitations of conventional metrics in evaluating abstract concepts. CoC provides a nuanced analysis by systematically dissecting and quantitatively contrasting various facets of the results for a comprehensive evaluation.\nOur contributions are summarized as follows: (1) We present the first comprehensive study across all complex visual reasoning tasks, including WinoGAViL, Winoground, Whoops, VCR, and NYCCC. (2) We design a context-aware image description generation method and a specific in-context learning strategy111The project is available at: https://CVR-LLM.github.io, to enhance the advanced visual reasoning ability of LLMs to multi-modal complex visual reasoning tasks. (3) We further introduce Chain-of-Comparsion, a novel GPT4-based comparison technique inspired by \"Chain-of-Thought\" filling the gaps of traditional metrics in abstract concept evaluation. (4) Experimental results show that our approach surpasses current SOTA models in a range of complex visual reasoning scenarios." + }, + { + "section_id": "2", + "parent_section_id": null, + "section_name": "Related Work", + "text": "" + }, + { + "section_id": "2.1", + "parent_section_id": "2", + "section_name": "Reasoning Research in Vision-Language Domain", + "text": "In recent years, multi-modal reasoning research has significantly advanced. Beyond the complex visual reasoning benchmarks discussed in Section 1 ###reference_###, many studies focus on the reasoning process itself, such as chain-of-thought Kojima et al. (2022 ###reference_b20###); Shaikh et al. (2022 ###reference_b39###) or reasoning modules Zhou et al. (2023b ###reference_b54###); Jiang et al. (2023 ###reference_b18###), which are crucial for enhancing AI models\u2019 analytical capabilities and performance. For instance, Liu et al. (2023b ###reference_b27###) introduced a modality-aligned thought chain reasoning framework to incorporate explicit reasoning into task-oriented dialogue generation, improving contextual understanding and effectiveness. Lv et al. (2023 ###reference_b30###) proposed a counterfactual cross-modality reasoning method for better video moment localization. Zhou et al. (2023a ###reference_b53###) developed a multi-step reasoning probability transfer mechanism to improve multi-label interaction classifications. Yu et al. (2023 ###reference_b48###) presented a hierarchical reasoning network to consolidate multi-level interactive cues, from coarse to fine-grained details, enhancing Human-Object Interaction (HOI) representations." + }, + { + "section_id": "2.2", + "parent_section_id": "2", + "section_name": "Large Language Models for Vision-Language Analysis", + "text": "The past two years have seen an unprecedented surge in the development and application of LLMs Brown et al. (2020 ###reference_b5###); Touvron et al. (2023a ###reference_b42###); Chiang et al. (2023 ###reference_b11###) across diverse fields. LLMs have garnered acclaim for their robust capabilities, including advanced analytical prowess Kojima et al. (2022 ###reference_b20###), extensive text-level knowledge Naveed et al. (2023 ###reference_b33###) and superior understanding ability Chang et al. (2023 ###reference_b6###). Furthermore, they are equipped with two powerful mechanisms: chain-of-thought Kojima et al. (2022 ###reference_b20###) and in-context learning Liu et al. (2021a ###reference_b26###), which significantly augment their effectiveness and performance in specialized tasks Naveed et al. (2023 ###reference_b33###). For example, Muraoka et al. (2023 ###reference_b32###) developed a cross-lingual model trained alongside a cross-lingual LLM, leveraging LLMs\u2019 capabilities across languages. Lan et al. (2023 ###reference_b21###) proposed reasoning question prompts for Visual Question Answering (VQA) tasks, unlocking LLMs\u2019 potential in zero-shot learning. Additionally, Yang et al. (2023 ###reference_b47###) introduced SODA, a system that integrates LLMs with explainable AI to assist marketers with data interpretation, enhancing human-AI collaboration. Zhong et al. (2023 ###reference_b52###) used knowledge distillation to imbue the SUR-adapter with LLMs\u2019 semantic understanding and reasoning capabilities." + }, + { + "section_id": "3", + "parent_section_id": null, + "section_name": "Methods", + "text": "In this section, we introduce the CVR-LLM framework, highlighting its innovative process for generating context-aware image descriptions (CaID) as well as its complex visual reasoning in-context learning (CVR-ICL) strategy. Initially, we explain the CaID generation process, which differs from traditional image captioning by using a self-refinement loop with feedback from Large Language Models (LLMs) to produce accurate and contextually relevant descriptions (Section 3.1 ###reference_###). Subsequently, we present the CVR-ICL approach (Section 3.2 ###reference_###), which enhances LLMs\u2019 contextual understanding and reasoning by assessing relevant cases and selecting suitable complex multi-modal demonstrations." + }, + { + "section_id": "3.1", + "parent_section_id": "3", + "section_name": "Context-Aware Image Description", + "text": "Pre-trained VLMs Li et al. (2023 ###reference_b22###); Alayrac et al. (2022 ###reference_b2###) have demonstrated their proficiency in generating detailed image captions on benchmarks such as MSCOCO Chen et al. (2015 ###reference_b9###). However, while these captions may accurately reflect visual content, they are not customized for complex visual reasoning scenarios. Recently, the trend of multi-modal instruction-following agents like miniGPT4 Zhu et al. (2023 ###reference_b55###); Chen et al. (2023 ###reference_b7###) and LLaVA Liu et al. (2024 ###reference_b25###, 2023a ###reference_b24###), integrating open-source LLMs Chiang et al. (2023 ###reference_b11###); Touvron et al. (2023b ###reference_b43###) with pre-trained vision encoders Dosovitskiy et al. (2020 ###reference_b13###); Liu et al. (2021b ###reference_b28###) to create a MLLM, has become very popular. The effectiveness of these models is heavily reliant on tuning with vast amounts of VL instruction data, which is generated by powerful LLMs like ChatGPT OpenAI (2022 ###reference_b34###) and GPT4 Achiam et al. (2023 ###reference_b1###). While promising, their reliance on extensive VL instruction data for tuning requires the substantial resource and time investment. In this work, we introduce a more efficient method for generating context-aware image descriptions, which depends on the inference process and leverages task-specific information and feedback from LLMs to craft better prompts, guiding the caption generation process more effectively.\nOur CaID framework optimizes the process of creating context-aware image descriptions through a dual-loop self-refinement approach, as shown in Figure 3 ###reference_###. Initially, it leverages task-specific details and LLM insights to craft precise image prompts. These initial prompts are designed to distill essential task-related information, guiding the captioner in producing descriptions that not only cover image content but are also deeply aligned with the task\u2019s requirements. Specifically, given a task specific text description with an image (for processes involving multiple images, we approach each image sequentially), the generation of initial context-aware image descriptions can be described as follows:\nwhere is the initial generated context-aware image description. C is the image-to-text captioner, transfering the image into the description. L is the LLM, encapsulating crucial task-related text information (e.g. requirements, questions, cue words) into feature prompts.\n###figure_3### In the second loop, our approach is crafted to encapsulate essential task-related details as well as LLMs\u2019 feedback, enhancing description generation with LLMs\u2019 vast knowledge. Specifically, it merges initial descriptions with task specifics and CVR-ICL examples into a task-focused prompt, guiding LLMs to make more precise predictions. These predictions are then treated as pseudo labels, asking LLMs to design further inquiries for deeper insights around them. In this way, we build up a feedback reflection between LLM prediction and context-aware caption, enhancing the richness and accuracy of the content produced. The textual feedback is then leveraged to refine the image prompts, providing deep insights that inform and guide the generation of nuanced image descriptions. The revised context-aware image descriptions can be described as follows:\nwhere is the revised generated context-aware image description. Q is the further query from LLM. is the prediction from LLM according to the generated task prompt. is the text feedback for updating image prompt.\n###figure_4###" + }, + { + "section_id": "3.2", + "parent_section_id": "3", + "section_name": "Complex Visual Reasoning ICL", + "text": "LLMs are renowned for their exceptional in-context learning capabilities, especially with task-specific examples. The optimal in-context exemplars enable LLMs to leverage their background knowledge for more precise outcomes. However, most of the research works Liu et al. (2021a ###reference_b26###); Sorensen et al. (2022 ###reference_b40###) have primarily focused on the text-centric domain, with few works Alayrac et al. (2022 ###reference_b2###); Zhao et al. (2023 ###reference_b50###) exploring multi-modal in-context learning for VL tasks. Our approach, unlike prior methods focused solely on text similarity in NLP, such as the NN-augmented in-context example selection (KATE), integrates multi-modal factors, thereby enriching the discipline with a fresh perspective. Furthermore, it is also different from MMICL Zhao et al. (2023 ###reference_b50###) in the multi-modal domain, which employs a vision prompt generator for image-to-visual embedding conversion and merges these with text embeddings as a union measurement factor.\nComplex visual reasoning tasks demand models capable of selecting in-context examples from a multi-modal domain, leveraging extensive background knowledge and information within it Zhao et al. (2023 ###reference_b50###). However, our CVR-LLM is grounded in LLMs, which are inherently text-based, leading to a gap between textual and multi-modal domains. Directly applying a text-based NN clustering method could result in the loss of important multi-modal information. On the other hand, using multi-modal information for retrieval might ignore essential context-aware information within our generated image descriptions. To address this, we propose the complex visual reasoning ICL, which aims to select in-context examples for LLMs by effectively integrating both text and multi-modal components. This dual analysis enables our LLM to more effectively select contextually relevant examples, ensuring a balanced integration of text and multi-modal insights for enhanced in-context learning. Figure 4 ###reference_### illustrates the framework of our CVR-ICL strategy. Specifically, given a task with an image , we initially convert the image into a description , which enables the task to be applicable not only in multi-modal domains but also in text-only scenarios. Then, we employ a multi-modal encoder and a text encoder to transform inputs from the multi-modal domain and the text domain into vector representations as follows:\nwhere is the vector representation in the multi-modal domain. is the vector representation in the text domain.\nUpon transforming each example into two distinct vector forms, we compute the cosine similarity score to identify and select the examples that are most relevant. Considering a target sample in test set and the th example in the training set, the similarity calculation process can be expressed as follows:\nwhere is the similarity score between the target sample and th example in dataset on the multi-modal domain, is the similarity score between the target sample and th example in dataset on the text domain. is the final similarity score. is the cosine similarity function. Finally, the top- cases with the highest are selected as the in-context examples, aimed at boosting the contextual understanding and prediction accuracy of the LLMs." + }, + { + "section_id": "4", + "parent_section_id": null, + "section_name": "Experiments", + "text": "" + }, + { + "section_id": "4.1", + "parent_section_id": "4", + "section_name": "Dataset and Metrics", + "text": "To evaluate the effectiveness of our proposed method, we conduct a comprehensive test in complex visual reasoning areas. Our evaluation included WinoGAViL (4373 samples), Winoground (400 samples), Whoops (500 samples), VCR (2653 out of over 26k samples, selecting a random 10), and NYCCC (528 samples), providing a broad assessment of our approach\u2019s capabilities.\nIn the terms of metrics, we adhered to the evaluation methods provided by these datasets, ensuring a fair assessment of our method\u2019s performance." + }, + { + "section_id": "4.2", + "parent_section_id": "4", + "section_name": "Implementation Details", + "text": "For the basic captioner in context-aware image description (Section 3.1 ###reference_###), we choose the BLIP2-flant5xxl Li et al. (2023 ###reference_b22###) as our baseline. For CVR-ICL phase (Section 3.2 ###reference_###), we employ BM25 Robertson et al. (1995 ###reference_b38###) and BLIP2 multi-embedding Li et al. (2023 ###reference_b22###) to encode text and multi-modal inputs, respectively. It is important to note that the ICL example results are derived from LLM inference without using actual annotations to prevent data leakage. For our LLMs, we choose three popular LLMs as inference models for generation tests including: Llama3-8B Meta (2024 ###reference_b31###) for CVR-LLMLlama3, GPT3.5 OpenAI (2023 ###reference_b35###) for CVR-LLMGPT3.5, and GPT4 Achiam et al. (2023 ###reference_b1###) for CVR-LLMGPT4. Performance comparisons are conducted directly on the test set without any fine-tuning, as WinoGAViL, Winoground, and NYCC datasets are exclusively for testing purposes." + }, + { + "section_id": "4.3", + "parent_section_id": "4", + "section_name": "Comparison to State-of-the-Arts", + "text": "In this section, we evaluate our proposed CVR-LLM against various models across a range of complex visual reasoning tasks, including WinoGAViL, Winoground, Whoops, VCR, and NYCCC. These models fall into two categories: VLMs Kim et al. (2021 ###reference_b19###); Radford et al. (2021 ###reference_b37###); Gan et al. (2020 ###reference_b14###); Li et al. (2023 ###reference_b22###) and MLLMs Liu et al. (2024 ###reference_b25###, 2023a ###reference_b24###); Zhu et al. (2023 ###reference_b55###); Chen et al. (2023 ###reference_b7###). Notably, MLLMs like LLaVA and MiniGPT4 struggle with tasks involving multiple images, making their performance data unavailable for WinoGAViL and Winoground.\nTable 1 ###reference_### showcases our method\u2019s superiority across five tasks, eclipsing both VLMs and LMMs. For example, our CVR-LLMLlama3 significantly surpasses the SOTA model BLIP2 by achieving an accuracy (+17.1 improvement) in SWOW setting on the WinoGAViL benchmarks. Similarly, it outperforms the SOTA model MiniGPT4 with a accuracy (+13.8 improvement) on the GPT4 rate Bitton-Guetta et al. (2023 ###reference_b4###) for Whoops tasks, underscoring our framework\u2019s advanced performance. Additionally, our method performs well on three LLM-based categories, demonstrating robust generation abilities with consistent performance. This highlights the versatility and adaptability of our model, ensuring high-quality results across various complex visual reasoning tasks." + }, + { + "section_id": "4.4", + "parent_section_id": "4", + "section_name": "Ablation Studies", + "text": "In this section, we examine the individual contributions of the components within our framework CVR-LLMGPT4. As demonstrated in Table 2 ###reference_###, we present an ablation study that quantifies the performance impact of each module across various datasets. The experimental findings suggest that the CVR-ICL module significantly boosts the inference performance of LLMs compared to using context-aware image descriptions alone, with the exception of the NYCCC dataset (It may be due to NYCCC\u2019s focus on humor, where precise descriptions are more critical). This highlights the CVR-ICL module\u2019s effectiveness in enhancing LLM capabilities across various tasks. In addition, our comprehensive method, CVR-LLM, which integrates both context-aware descriptions and CVR-ICL, achieves a substantial enhancement in performance relative to the baseline." + }, + { + "section_id": "4.5", + "parent_section_id": "4", + "section_name": "Analysis", + "text": "In this section, we investigate CaID\u2019s impact at an abstract level and design a novel method to quantitatively demonstrate the semantic gap between context-aware image descriptions and general image captions (Note that the performance impact has been shown in Table 2 ###reference_###). Figure 5 ###reference_### provides two examples comparing context-aware image descriptions with general image captions and our goal is to determine whether context-aware descriptions offer more contextually relevant information to aid LLMs in decision-making. Unlike traditional sentence evaluations that rely on annotations to compute metrics like BLEU Papineni et al. (2002 ###reference_b36###) and CIDEr Vedantam et al. (2015 ###reference_b44###), we lack direct measures to assess the contextual relevance of sentences. To address this, we use GPT4 Achiam et al. (2023 ###reference_b1###) to evaluate the relative effectiveness between two kinds of expressions with the prompt: \u201cEvaluate the equivalence of the following two options for the task XXX. Option A: XXX; Option B: XXX. Please return True if Option B is better than Option A in answering questions; return False if the opposite is true; return Equal if they are the same for the question.\u201d. Additionally, inspired by the concept of chain-of-thought (CoT) Wei et al. (2022 ###reference_b45###), we propose a novel comparison chain-of-comparison (CoC), which implements a step-by-step analysis to evaluate the effectiveness. This method involves a comprehensive four-step analysis protocol, depicted in Figure 6 ###reference_###. It follows a series of cognitive steps that our brains undertake to make sense of information, particularly when engaging with complex problems.\n###figure_5### ###figure_6### ###figure_7### Figure 7 ###reference_### shows the results of directly employing GPT4 to compare the effectiveness of general image captions with our image descriptions in the specific scenario of answering task-related questions. Furthermore, Table 3 ###reference_### presents the performance derived from utilizing GPT4 to conduct a detailed, step-by-step analytical assessment of effectiveness. These empirical results indicate that our approach yields image descriptions with enhanced contextual relevance, thereby significantly aiding LLMs in the decision-making process, particularly on the WinoGAViL and Whoops datasets.\nThe CVR-ICL is designed to optimize the selection of in-context exemplars within a multi-modal environment, thereby enhancing the reasoning abilities of LLMs. This innovative method is contrasted with three alternative configurations: Random In-Context Learning (RICL) Brown et al. (2020 ###reference_b5###), KATE Liu et al. (2021a ###reference_b26###), and Multi-modal Similar In-Context Learning (MMICL) Zhao et al. (2023 ###reference_b50###). To ensure a fair comparison, we utilized general image captions across all models to test performance for eliminating the effect of our context-aware image descriptions. As demonstrated in Table 4 ###reference_###, our CVR-ICL outperforms other ICL methods, demonstrating its adeptness at integrating and leveraging both textual and multi-modal domains to select the most contextually appropriate exemplars.\nFigure 8 ###reference_### illustrates the influence of varying case numbers in the CVR-ICL on the performance of our proposed CVR-LLM method. The experimental results suggest a trend where the model\u2019s performance initially improves with an increase in case numbers, exhibits fluctuations at higher numbers, and eventually declines as the case number becomes excessively large. This pattern suggests that the optimal selection for the number of cases is four.\n###figure_8### ###figure_9###" + }, + { + "section_id": "5", + "parent_section_id": null, + "section_name": "Qualitative Results", + "text": "To showcase the capabilities of our approach, we present qualitative results in Figure 9 ###reference_###. It illustrates how LLMs leverage contextual information to ask more relevant and insightful questions tailored the specific tasks. For instance, when provided with an image of the chess piece, the LLMs might ask \u201cWhat does the chess piece look like?\u201d. Subsequently, the captioner model generates contextually appropriate descriptions, such as \u201cA chess piece that looks like a unicorn.\u201d. This synergy enhances the LLM\u2019s decision-making process, making it more precise and context-aware. More detailed qualitative results with corresponding prompts and CVR-ICL examples are illustrated in Appendix A.1 ###reference_### and Appendix A.2 ###reference_###." + }, + { + "section_id": "6", + "parent_section_id": null, + "section_name": "Conclusion", + "text": "In this work, we propose CVR-LLM, an innovative approach for complex visual reasoning tasks. This method boosts LLMs\u2019 understanding of visual content for complex reasoning via context-aware image descriptions. We also develop a multi-modal in-context learning technique, enhancing LLMs\u2019 reasoning skills at both image and text levels. Experimental results show that CVR-LLM sets new benchmarks across multiple complex visual reasoning tasks. We also introduce a nuanced GPT4 based analysis technique Chain-of-Comparison to automatically break down and contrast among various aspects of generated results." + }, + { + "section_id": "7", + "parent_section_id": null, + "section_name": "Limitation", + "text": "Although our approach achieves SOTA performance across a wide range of complex visual reasoning benchmarks, it still has two notable limitations. First, compared to the MLLMs that can perform end-to-end inference directly, our approach operates as an LLM-agent-driven framework. This involves VLMs generating context-aware image descriptions, followed by the LLM performing inference with ICL to predict the answer. While this two-step process enhances contextual understanding and reasoning, it may significantly increase time consumption compared to direct end-to-end inference models. Second, despite its overall strong performance and generalization ability, our approach still lags behind GPT4V in some tasks. Figure 10 ###reference_### shows that our CVR-LLM can surpass GPT4V in SWOW setting in WinoGAViL dataset but fall short in others. Our future work will focus on refining the integration between VLMs and LLMs components and enhancing the model\u2019s efficiency and accuracy across a broader spectrum of complex visual reasoning challenges.\n###figure_10###" + } + ], + "appendix": [ + { + "section_id": "Appendix 1", + "parent_section_id": null, + "section_name": "Appendix A Appendix", + "text": "Section 5 ###reference_### only illustrates the simplified process of our Context-aware Image Description (CaID) generation. Here, we delve into more details about the generation process and the corresponding prompts. Figure 11 ###reference_### provides an example of the CaID generation process applied to the VCR Zellers et al. (2019 ###reference_b49###) task. In this example, the initial input consists of an image showing several individuals, with two of them (Person1 and Person4) holding guns. The associated question is: \u201cWhy do Person1 and Person4 have guns?\u201d with multiple-choice options such as \u201c1) They are soldiers. 2) Person1 and Person4 are robbing a hotel room. 3) They are cattle thieves. 4) They are about to shoot someone.\u201d.\nThe CaID process begins by generating a detailed description of the image. The captioner model produces an initial caption: \u201cAn image of a man in a suit with a gun and another in a suit with a gun.\u201d. This caption, while descriptive, lacks the context needed to answer the specific question posed. To address this, our system prompts the LLM with a scenario where it acts as a questioner for the image caption model. The LLM is instructed to generate a follow-up question to gather crucial information for answer prediction. The prompt guides the LLM to consider specific details such as the appearance and pose of the individuals. In this case, the LLM generates the question: \u201cWhat is the appearance of Person1 and Person4?\u201d. This question is designed to extract more contextually relevant details from the image captioner. The captioner then provides a refined description: \u201cPerson1 is wearing a suit with a gun and Person4 is wearing a suit with a gun.\u201d. This additional information helps to better understand the scene and narrows down the possible answers to the original question. This detailed process highlights how our system leverages both multi-modal and textual information to generate precise and contextually relevant descriptions, ultimately improving the performance on complex visual reasoning tasks.\n###figure_11### ###figure_12### Section 3.2 ###reference_### only illustrates the mechanism of our CVR-ICL. Here, we explain more details about its implementation. Figures 12 ###reference_### showcases one example of our CVR-ICL on the WinoGAViL Bitton et al. (2022 ###reference_b3###).\nTo accurately calculate similarity scores using the cosine similarity function, we utilize BM25 Robertson et al. (1995 ###reference_b38###) for text encoding and BLIP2 multi-embedding Li et al. (2023 ###reference_b22###) for multi-modal inputs. As illustrated in Figure 12 ###reference_###, the process begins with encoding both the test and training prompts through multi-modal and text-based encoders. For instance, a test case from WinoGAViL might contain the question \u201cSelect two pictures most related to clouds?\u201d along with images of a foggy river, a cloud of sand on a beach, and other related scenes. At the beginning, the multi-modal encoder processes these images as well as the question and generates multimodal-level embeddings. Simultaneously, we convert these images into context-aware image descriptions and translate the entire case into text form. The text-based encoder then generates corresponding text-level embeddings. Next, we calculate the individual cosine similarity scores in both the multi-modal and text domains. The final similarity score, which determines the most relevant cases, is calculated in a balanced manner as . These scores are then sorted, and the top- most similar cases are selected as in-context learning examples. This dual-encoding and similarity scoring approach ensures that we capture the nuanced relationships between multi-modal inputs and text, thereby enhancing the accuracy and relevance of our in-context learning framework.\nIn this section, we explore the impact of fine-tuning strategy on performance in complex visual reasoning tasks. Since some tasks in the complex visual reasoning field are initially designed in the supervised setting, we are curious whether our approach can also perform better with the help of real annotation. For the test-only datasets WinoGAViL and Winoground, we randomly divided them into splits of training, validation, and testing. Due to the small number of cases in these tasks, we abandoned training LLMs to avoid catastrophic forgetting. Instead, we chose to fine-tune the captioner using the real labels and incorporated these real annotations into our CVR-ICL examples. Results shown in Table 5 ###reference_### compare our CVR-LLM\u2019s performance in zero-shot and fine-tuned settings against SOTA performances, revealing that our method maintains SOTA performance in several areas.\n###figure_13### The Chain-of-Comparison (CoC) is designed to qualitatively analyze the semantic contribution of context-aware image descriptions against general image captions. It is inspired by the popular idea of Chain-of-Thought, which implements a step-by-step analysis to evaluate effectiveness. Figure 13 ###reference_### shows an example from the Whoops dataset, comparing the semantic gap between a general caption \u201cAn airplane prepares to take off\u201d (Option A) and our context-aware image description \u201cAn airplane is taking off from a highway in the middle of the desert.\u201d. (Option B).\nOur CoC prompt asks the LLM to analyze the semantic contribution through four steps: Initial Perception, Recognizing Incongruity, Contextual Analysis, and Linking to the Question. This process mimics the human brain\u2019s analytical process. We directly ask the LLM to compare the contributions of the two options and determine which is better.\nFor instance, in the Initial Perception step, the LLM identifies Option B as superior because it is highly unusual and immediately striking, as airplanes typically do not take off from highways, especially in desert environments. This scenario is much more unusual and striking compared to the routine scenario of Option A, which merely depicts an airplane preparing to take off at an airport. During the Contextual Analysis step, Option B is again favored. The LLM explains that contextually, the scenario raises questions about why an airplane is using a highway in a desert for takeoff, which is not standard practice and could imply unusual circumstances or emergencies. Option A, in contrast, has nothing contextually strange about an airplane preparing for takeoff in a typical airport setting. Finally, in the Linking to the Question step, the LLM determines that Option B provides a clearer connection to the concept of weirdness through its unconventional and striking situation. Option A does not inherently link to weirdness, as it describes a routine occurrence in aviation.\nThis example demonstrates how our CoC framework effectively breaks down and evaluates the semantic contributions of different types of image descriptions, highlighting the advantages of context-aware image descriptions in complex visual reasoning tasks.\nTable 1 ###reference_### presents the results of our CVR-LLM framework using Llama3, GPT-3.5, and GPT-4 base models. Additionally, we evaluated CVR-LLM on the Llama2-13B model Touvron et al. (2023b ###reference_b43###), which was also employed in LLaVA Wu et al. (2023 ###reference_b46###); Liu et al. (2024 ###reference_b25###), to ensure a fair comparison. Table 6 ###reference_### compares the performance of CVR-LLM (Llama2-based) and CVR-LLM (Llama3-based) against LLaVA versions 1.0 Liu et al. (2024 ###reference_b25###) and 1.5 Wu et al. (2023 ###reference_b46###) on complex reasoning tasks. The results demonstrate that while our CVR-LLM performs well on the Llama2 base model, it slightly underperform compared to Llama3.\nSection 3.2 ###reference_### explains that our in-context learning examples are selected based on a similarity score calculated as follows:\nIn this section, we discuss how the parameter influences the performance of In-Context Learning (ICL). Table 7 ###reference_### presents the results for various values of on the WinoGAViL dataset. The results indicate that leads to the best performance of our CVR-ICL strategy.\nIn the main paper, we compare our method with several popular end-to-end MLLMs, including LLaVA Wu et al. (2023 ###reference_b46###) and MiniGPT-4 Zhu et al. (2023 ###reference_b55###). Additionally, we evaluate our approach against VLM+LLM methods such as DDCoT Zheng et al. (2023 ###reference_b51###) and DIEM Jiang et al. (2024 ###reference_b17###). Table 8 ###reference_### presents the comparison results of our CVR-LLM framework versus these methods. While our approach is similar to DIEM in focusing on visual information from images, it demonstrates superior performance in complex visual reasoning tasks. Instead of decomposing the image and extracting information from individual components, we utilize an iterative refinement strategy, enabling the Large Language Model (LLM) to pose more precise questions and extract highly specific, valuable information from the image.\nOur CVR-LLM framework is designed for complex visual reasoning tasks, making it well-suited for multi-step reasoning datasets, such as ScienceQA Lu et al. (2022 ###reference_b29###) and M3CoT Chen et al. (2024 ###reference_b8###). In this section, we evaluate the performance of our CVR-LLM on the M3CoT dataset to determine its effectiveness. Table 9 ###reference_### presents a comparison between our CVR-LLM and other Tool-Usage methods. The results show that our approach performs well on questions related to general image content, particularly in areas like physical and social sciences. However, it faces challenges with images containing multiple elements, occasionally leading to hallucinations in detailed descriptions." + } + ], + "tables": { + "1": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
TypeModelWinoGAViLWinogroundWhoopsVCRNYCCC
5/610/12SWOWTextImageGroupGPT4 RateQ->AQA->RMatch acc.CrowdAcc
VLMViLT\u00a0Kim et\u00a0al. (2021)\n55.052.059.034.714.09.2-----
CLIP ViT-L/14\u00a0Radford et\u00a0al. (2021)\n47.015.066.0------56.655.8
UNITER\u00a0Chen et\u00a0al. (2020)\n---38.014.010.5-----
ViLLA\u00a0Gan et\u00a0al. (2020)\n---37.013.211.0---48.147.0
BLIP\u00a0Li et\u00a0al. (2022)\n54.645.066.546.527.724.222.029.227.558.758.1
BLIP2\u00a0Li et\u00a0al. (2023)\n49.338.871.644.026.023.531.024.525.658.356.7
MLLMLLaVA 1.0\u00a0Liu et\u00a0al. (2024)\n------32.028.340.055.853.1
LLaVA 1.5\u00a0Liu et\u00a0al. (2023a)\n------42.435.144.559.356.0
MiniGPT4 V1\u00a0Zhu et\u00a0al. (2023)\n------44.640.647.758.555.6
MiniGPT4 V2\u00a0Chen et\u00a0al. (2023)\n------48.248.849.760.459.2
VLM+LLMCVR-LLMLlama3\n72.370.488.745.029.524.560.450.552.459.857.7
CVR-LLMGPT3.5\n73.471.683.442.730.523.561.251.153.459.456.8
CVR-LLMGPT4\n74.773.286.543.535.026.562.052.954.360.657.4
\n
\n
Table 1: The comparison of our CVR-LLM with popular VLMs and MM LLMs on five complex visual reasoning tasks. Notably, MLLMs like LLaVA and MiniGPT4 exhibit limitations in handling tasks involving multiple images or computing image-text similarity scores, resulting in their performance being unavailable for tasks like WinoGAViL and Winoground.
\n
", + "capture": "Table 1: The comparison of our CVR-LLM with popular VLMs and MM LLMs on five complex visual reasoning tasks. Notably, MLLMs like LLaVA and MiniGPT4 exhibit limitations in handling tasks involving multiple images or computing image-text similarity scores, resulting in their performance being unavailable for tasks like WinoGAViL and Winoground." + }, + "2": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModuleWinoGAViLWinogroundWhoopsVCRNYCCC
5/610/12SWOWTextImageGroupGPT4 RateQ->AQA->RQ->ARMatch acc.CrowdAccNYAcc
Base60.058.378.428.726.216.036.438.037.021.341.841.346.0
Base+CaID63.562.073.731.530.019.754.643.944.222.951.548.753.6
Base+CVR-ICL69.866.180.939.029.222.060.648.849.225.848.047.652.9
CVR-LLMGPT4\n73.473.286.543.535.026.562.054.352.930.460.657.463.1
\n
\n
Table 2: The ablation study of our CVR-LLM on five complex visual reasoning tasks. \"Base\" represents using the general image captions and GPT4 to complete these tasks. \"Base+CaID\" means using the context-aware image descriptions instead of the general image captions and GPT4 to test the performance. \"Base+CVR-ICL\" represents using general image captions and GPT4 with our designed CVR-ICL learning methods.
\n
", + "capture": "Table 2: The ablation study of our CVR-LLM on five complex visual reasoning tasks. \"Base\" represents using the general image captions and GPT4 to complete these tasks. \"Base+CaID\" means using the context-aware image descriptions instead of the general image captions and GPT4 to test the performance. \"Base+CVR-ICL\" represents using general image captions and GPT4 with our designed CVR-ICL learning methods." + }, + "3": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
DatasetOptionStep 1Step 2Step 3Step 4Average
WinoGAViLCaption Better6.04.38.35.05.9
Description Better75.376.071.376.774.8
Equal18.719.720.318.319.3
WinogroundCaption Better24.024.029.027.026
Description Better59.056.059.056.057.5
Equal17.020.012.017.016.5
WhoopsCaption Better27.013.014.013.016.7
Description Better71.080.076.075.075.5
Equal2.07.010.012.07.7
VCRCaption Better24.332.530.128.628.9
Description Better53.545.450.652.750.5
Equal22.222.119.318.720.6
NYCCCCaption Better18.615.817.419.117.7
Description Better58.562.360.461.060.5
Equal22.921.922.219.921.8
\n
\n
Table 3: The performance of using GPT4 to assess the effectiveness of two options (general image caption and our context-aware image description) based on CoC.
\n
", + "capture": "Table 3: The performance of using GPT4 to assess the effectiveness of two options (general image caption and our context-aware image description) based on CoC." + }, + "4": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
DatasetCategoryRICL\u00a0Brown et\u00a0al. (2020)\nKATE\u00a0Liu et\u00a0al. (2021a)\nMMICL\u00a0Zhao et\u00a0al. (2023)\nCVR-ICL
WinoGAViL5/664.168.666.369.8
10/1261.764.162.866.1
SWOW80.782.880.980.9
WinogroundText35.029.527.539.0
Image22.530.025.029.2
Group18.520.017.522.0
WhoopsGPT4 Rate60.462.060.862.0
VCRQ->A45.148.644.048.8
QA->R46.548.946.349.2
Q->AR22.524.823.625.8
NYCCCMatch acc.44.447.545.548.0
CrowdAcc46.646.443.747.6
NYAcc50.351.249.852.9
\n
\n
Table 4: The performance of using different ICL methods on different datasets.
\n
", + "capture": "Table 4: The performance of using different ICL methods on different datasets." + }, + "5": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
DatasetCategoryZero-shotFinetuned
SOTACVR-LLMSOTACVR-LLM
WinoGAViL5/655.074.754.682.8
10/1252.073.247.280.8
SWOW59.088.768.895.9
WinogroundText46.543.547.055.0
Image27.735.042.242.5
Group24.226.530.535.0
WhoopsGPT-4 Rate31.062.071.072.0
VCRQ->A48.852.987.485.3
QA->R49.754.389.687.5
Q->AR28.630.478.677.1
NYCCCMatch acc.60.460.684.580.9
CrowdAcc59.257.473.369.6
NYAcc66.563.168.265.4
\n
\n
Table 5: The comparison of our CVR-LLM against SOTA performance under two kinds of settings.
\n
", + "capture": "Table 5: The comparison of our CVR-LLM against SOTA performance under two kinds of settings." + }, + "6": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelWhoopsVCR (Q->A)NYCCC (Match)
LLaVA 1.032.028.355.8
LLaVA 1.542.435.159.3
CVR-LLMLlama2\n55.644.656.4
CVR-LLMLlama3\n60.450.559.8
\n
\n
Table 6: The comparison of our CVR-LLM with Llama2 and Llama3 base against SOTA LLaVA models.
\n
", + "capture": "Table 6: The comparison of our CVR-LLM with Llama2 and Llama3 base against SOTA LLaVA models." + }, + "7": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
0.10.20.30.512
WinoGAViL (5/6)66.667.966.568.369.865.8
WinoGAViL (10/12)63.765.163.664.866.162.0
WinoGAViL (swow)76.377.075.878.180.972.7
\n
\n
Table 7: The performance of our CVR-LLM framework with varying values on the WinoGAViL dataset.
\n
", + "capture": "Table 7: The performance of our CVR-LLM framework with varying values on the WinoGAViL dataset." + }, + "8": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Models\n\n\nWinoGAViL\n\n(swow)\n\n\n\nWinoground\n\n(group)\n\n\n\nWhoops\n\n(GPT4 reate)\n\n\n\nVCR\n\n(Q->A)\n
DDCoT77.520.248.440.7
DIEM83.522.558.050.5
CVR-LLM86.526.562.054.3
\n
\n
Table 8: The comparison of our CVR-LLM against other VLM+LLM methods.
\n
", + "capture": "Table 8: The comparison of our CVR-LLM against other VLM+LLM methods." + }, + "9": { + "table_html": "
\n
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
ModelHuggingGPTIdealGPTChameleonCVR-LLM
Lang17.5731.7343.8734.10
Natural20.9331.6326.0533.20
Social10.3326.2325.4424.84
Physical8.756.5239.1371.11
Social14.7550.0037.3069.83
Temporal9.7626.8348.7830.89
Algebra11.3520.5717.7329.29
Geometry22.5030.0026.2522.50
Theory9.5238.1023.8128.57
\n
\n
Table 9: The comparison of our CVR-LLM against other Tool-Usage methods on the M3CoT dataset.
\n
", + "capture": "Table 9: The comparison of our CVR-LLM against other Tool-Usage methods on the M3CoT dataset." + } + }, + "image_paths": { + "1": { + "figure_path": "2409.13980v1_figure_1.png", + "caption": "Figure 1: Five distinct examples from diverse datasets in the complex visual reasoning field Bitton-Guetta et al. (2023) challenge AI models\u2019 ability of complex reasoning in different aspects such as general commonsense.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture1.png" + }, + "2": { + "figure_path": "2409.13980v1_figure_2.png", + "caption": "Figure 2: An example of our CVR-LLM works on the Winoground dataset. Our method transfers images into context-aware image descriptions through CaID and leverages the sophisticated reasoning and ICL abilities of LLMs with the CVR-ICL module, offering a more precise answer.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture2.png" + }, + "3": { + "figure_path": "2409.13980v1_figure_3.png", + "caption": "Figure 3: The framework overview of CaID. It is designed to transfer images into contextualized descriptions, bypassing the need for direct multi-modal fusion and leveraging LLMs\u2019 extensive knowledge for more accurate predictions.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture3.png" + }, + "4": { + "figure_path": "2409.13980v1_figure_4.png", + "caption": "Figure 4: The generic diagram of our proposed CVR-ICL approach. The dual analysis enables our approach to more effectively select contextually relevant examples from text and multi-modal domains.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture4.png" + }, + "5": { + "figure_path": "2409.13980v1_figure_5.png", + "caption": "Figure 5: Two examples from WinoGAViL compare context-aware image descriptions with general image captions. WinoGAViL is designed to ask the model to select the image that best matches the cue word.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture5.png" + }, + "6": { + "figure_path": "2409.13980v1_figure_6.png", + "caption": "Figure 6: The illustration of how to use GPT4 for step-by-step comparsion.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture6.png" + }, + "7": { + "figure_path": "2409.13980v1_figure_7.png", + "caption": "Figure 7: Hypothesis verification with GPT4, which demonstrates the effectiveness of our CaID against general image captions.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture7.png" + }, + "8": { + "figure_path": "2409.13980v1_figure_8.png", + "caption": "Figure 8: The different case numbers in CVR-ICL and corresponding performance.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture8.png" + }, + "9": { + "figure_path": "2409.13980v1_figure_9.png", + "caption": "Figure 9: Two qualitative results from Whoops illustrating the capabilities of our approach. Whoops is designed to ask the model to explain what makes images weird.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture9.png" + }, + "10": { + "figure_path": "2409.13980v1_figure_10.png", + "caption": "Figure 10: The comparison of our CVR-LLM against GPT-4V.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture10.png" + }, + "11": { + "figure_path": "2409.13980v1_figure_11.png", + "caption": "Figure 11: The detailed illustration of our CaID process on VCR. Best viewed by zooming in.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture11.png" + }, + "12": { + "figure_path": "2409.13980v1_figure_12.png", + "caption": "Figure 12: The detailed illustration of our CVR-ICL on WinoGAViL. Best viewed by zooming in.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture13.png" + }, + "13": { + "figure_path": "2409.13980v1_figure_13.png", + "caption": "Figure 13: The detailed illustration of our CoC on Whoops. Best viewed by zooming in.", + "url": "http://arxiv.org/html/2409.13980v1/extracted/5869515/images/picture15.png" + } + }, + "validation": true, + "references": [ + { + "1": { + "title": "Gpt-4 technical report.", + "author": "Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023.", + "venue": "arXiv preprint arXiv:2303.08774.", + "url": null + } + }, + { + "2": { + "title": "Flamingo: a visual language model for few-shot learning.", + "author": "Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. 2022.", + "venue": "Advances in Neural Information Processing Systems, 35:23716\u201323736.", + "url": null + } + }, + { + "3": { + "title": "Winogavil: Gamified association benchmark to challenge vision-and-language models.", + "author": "Yonatan Bitton, Nitzan Bitton Guetta, Ron Yosef, Yuval Elovici, Mohit Bansal, Gabriel Stanovsky, and Roy Schwartz. 2022.", + "venue": "Advances in Neural Information Processing Systems, 35:26549\u201326564.", + "url": null + } + }, + { + "4": { + "title": "Breaking common sense: Whoops! a vision-and-language benchmark of synthetic and compositional images.", + "author": "Nitzan Bitton-Guetta, Yonatan Bitton, Jack Hessel, Ludwig Schmidt, Yuval Elovici, Gabriel Stanovsky, and Roy Schwartz. 2023.", + "venue": "In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2616\u20132627.", + "url": null + } + }, + { + "5": { + "title": "Language models are few-shot learners.", + "author": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020.", + "venue": "Advances in neural information processing systems, 33:1877\u20131901.", + "url": null + } + }, + { + "6": { + "title": "A survey on evaluation of large language models.", + "author": "Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. 2023.", + "venue": "ACM Transactions on Intelligent Systems and Technology.", + "url": null + } + }, + { + "7": { + "title": "Minigpt-v2: large language model as a unified interface for vision-language multi-task learning.", + "author": "Jun Chen, Deyao Zhu, Xiaoqian Shen, Xiang Li, Zechun Liu, Pengchuan Zhang, Raghuraman Krishnamoorthi, Vikas Chandra, Yunyang Xiong, and Mohamed Elhoseiny. 2023.", + "venue": "arXiv preprint arXiv:2310.09478.", + "url": null + } + }, + { + "8": { + "title": "M3cot: A novel benchmark for multi-domain multi-step multi-modal chain-of-thought.", + "author": "Qiguang Chen, Libo Qin, Jin Zhang, Zhi Chen, Xiao Xu, and Wanxiang Che. 2024.", + "venue": "arXiv preprint arXiv:2405.16473.", + "url": null + } + }, + { + "9": { + "title": "Microsoft coco captions: Data collection and evaluation server.", + "author": "Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2015.", + "venue": "In arXiv preprint arXiv:1504.00325.", + "url": null + } + }, + { + "10": { + "title": "Uniter: Universal image-text representation learning.", + "author": "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020.", + "venue": "In European conference on computer vision, pages 104\u2013120. Springer.", + "url": null + } + }, + { + "11": { + "title": "Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality.", + "author": "Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. 2023.", + "venue": "See https://vicuna. lmsys. org (accessed 14 April 2023), 2(3):6.", + "url": null + } + }, + { + "12": { + "title": "Training verifiers to solve math word problems.", + "author": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, et al. 2021.", + "venue": "arXiv preprint arXiv:2110.14168.", + "url": null + } + }, + { + "13": { + "title": "An image is worth 16x16 words: Transformers for image recognition at scale.", + "author": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020.", + "venue": "arXiv preprint arXiv:2010.11929.", + "url": null + } + }, + { + "14": { + "title": "Large-scale adversarial training for vision-and-language representation learning.", + "author": "Zhe Gan, Yen-Chun Chen, Linjie Li, Chen Zhu, Yu Cheng, and Jingjing Liu. 2020.", + "venue": "Advances in Neural Information Processing Systems, 33:6616\u20136628.", + "url": null + } + }, + { + "15": { + "title": "Vision-language pre-training: Basics, recent advances, and future trends.", + "author": "Zhe Gan, Linjie Li, Chunyuan Li, Lijuan Wang, Zicheng Liu, Jianfeng Gao, et al. 2022.", + "venue": "Foundations and Trends\u00ae in Computer Graphics and Vision, 14(3\u20134):163\u2013352.", + "url": null + } + }, + { + "16": { + "title": "Do androids laugh at electric sheep? humor\" understanding\" benchmarks from the new yorker caption contest.", + "author": "Jack Hessel, Ana Marasovi\u0107, Jena D Hwang, Lillian Lee, Jeff Da, Rowan Zellers, Robert Mankoff, and Yejin Choi. 2022.", + "venue": "arXiv preprint arXiv:2209.06293.", + "url": null + } + }, + { + "17": { + "title": "Diem: Decomposition-integration enhancing multimodal insights.", + "author": "Xinyi Jiang, Guoming Wang, Junhao Guo, Juncheng Li, Wenqiao Zhang, Rongxing Lu, and Siliang Tang. 2024.", + "venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 27304\u201327313.", + "url": null + } + }, + { + "18": { + "title": "Multi-spectral image stitching via spatial graph reasoning.", + "author": "Zhiying Jiang, Zengxi Zhang, Jinyuan Liu, Xin Fan, and Risheng Liu. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 472\u2013480.", + "url": null + } + }, + { + "19": { + "title": "Vilt: Vision-and-language transformer without convolution or region supervision.", + "author": "Wonjae Kim, Bokyung Son, and Ildoo Kim. 2021.", + "venue": "In International Conference on Machine Learning, pages 5583\u20135594. PMLR.", + "url": null + } + }, + { + "20": { + "title": "Large language models are zero-shot reasoners.", + "author": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022.", + "venue": "Advances in neural information processing systems, 35:22199\u201322213.", + "url": null + } + }, + { + "21": { + "title": "Improving zero-shot visual question answering via large language models with reasoning question prompts.", + "author": "Yunshi Lan, Xiang Li, Xin Liu, Yang Li, Wei Qin, and Weining Qian. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 4389\u20134400.", + "url": null + } + }, + { + "22": { + "title": "Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models.", + "author": "Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. 2023.", + "venue": "arXiv preprint arXiv:2301.12597.", + "url": null + } + }, + { + "23": { + "title": "Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation.", + "author": "Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022.", + "venue": "In International Conference on Machine Learning, pages 12888\u201312900. PMLR.", + "url": null + } + }, + { + "24": { + "title": "Improved baselines with visual instruction tuning.", + "author": "Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. 2023a.", + "venue": "arXiv preprint arXiv:2310.03744.", + "url": null + } + }, + { + "25": { + "title": "Visual instruction tuning.", + "author": "Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2024.", + "venue": "Advances in neural information processing systems, 36.", + "url": null + } + }, + { + "26": { + "title": "What makes good in-context examples for gpt-?", + "author": "Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2021a.", + "venue": "arXiv preprint arXiv:2101.06804.", + "url": null + } + }, + { + "27": { + "title": "Matcr: Modality-aligned thought chain reasoning for multimodal task-oriented dialogue generation.", + "author": "Yiting Liu, Liang Li, Beichen Zhang, Shan Huang, Zheng-Jun Zha, and Qingming Huang. 2023b.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 5776\u20135785.", + "url": null + } + }, + { + "28": { + "title": "Swin transformer: Hierarchical vision transformer using shifted windows.", + "author": "Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. 2021b.", + "venue": "In Proceedings of the IEEE/CVF international conference on computer vision, pages 10012\u201310022.", + "url": null + } + }, + { + "29": { + "title": "Learn to explain: Multimodal reasoning via thought chains for science question answering.", + "author": "Pan Lu, Swaroop Mishra, Tony Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. 2022.", + "venue": "In The 36th Conference on Neural Information Processing Systems (NeurIPS).", + "url": null + } + }, + { + "30": { + "title": "Counterfactual cross-modality reasoning for weakly supervised video moment localization.", + "author": "Zezhong Lv, Bing Su, and Ji-Rong Wen. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 6539\u20136547.", + "url": null + } + }, + { + "31": { + "title": "Llama 3 model card.", + "author": "Meta. 2024.", + "venue": null, + "url": "https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md" + } + }, + { + "32": { + "title": "Cross-lingual transfer of large language model by visually-derived supervision toward low-resource languages.", + "author": "Masayasu Muraoka, Bishwaranjan Bhattacharjee, Michele Merler, Graeme Blackwood, Yulong Li, and Yang Zhao. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 3637\u20133646.", + "url": null + } + }, + { + "33": { + "title": "A comprehensive overview of large language models.", + "author": "Humza Naveed, Asad Ullah Khan, Shi Qiu, Muhammad Saqib, Saeed Anwar, Muhammad Usman, Nick Barnes, and Ajmal Mian. 2023.", + "venue": "arXiv preprint arXiv:2307.06435.", + "url": null + } + }, + { + "34": { + "title": "Introducing chatgpt.", + "author": "OpenAI. 2022.", + "venue": null, + "url": "https://openai.com/blog/chatgpt" + } + }, + { + "35": { + "title": "Gpt-3.5: Generative pre-trained transformer 3.5.", + "author": "OpenAI. 2023.", + "venue": "https://www.openai.com/research/gpt-3-5.", + "url": null + } + }, + { + "36": { + "title": "Bleu: a method for automatic evaluation of machine translation.", + "author": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.", + "venue": "In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311\u2013318. Association for Computational Linguistics.", + "url": null + } + }, + { + "37": { + "title": "Learning transferable visual models from natural language supervision.", + "author": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021.", + "venue": "arXiv preprint arXiv:2103.00020.", + "url": null + } + }, + { + "38": { + "title": "Okapi at trec-3.", + "author": "Stephen E Robertson, Steve Walker, Susan Jones, Micheline M Hancock-Beaulieu, and Mike Gatford. 1995.", + "venue": "NIST SPECIAL PUBLICATION SP, pages 109\u2013109.", + "url": null + } + }, + { + "39": { + "title": "On second thought, let\u2019s not think step by step! bias and toxicity in zero-shot reasoning.", + "author": "Omar Shaikh, Hongxin Zhang, William Held, Michael Bernstein, and Diyi Yang. 2022.", + "venue": "arXiv preprint arXiv:2212.08061.", + "url": null + } + }, + { + "40": { + "title": "An information-theoretic approach to prompt engineering without ground truth labels.", + "author": "Taylor Sorensen, Joshua Robinson, Christopher Michael Rytting, Alexander Glenn Shaw, Kyle Jeffrey Rogers, Alexia Pauline Delorey, Mahmoud Khalil, Nancy Fulda, and David Wingate. 2022.", + "venue": "arXiv preprint arXiv:2203.11364.", + "url": null + } + }, + { + "41": { + "title": "Winoground: Probing vision and language models for visio-linguistic compositionality.", + "author": "Tristan Thrush, Ryan Jiang, Max Bartolo, Amanpreet Singh, Adina Williams, Douwe Kiela, and Candace Ross. 2022.", + "venue": "In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5238\u20135248.", + "url": null + } + }, + { + "42": { + "title": "Llama: Open and efficient foundation language models.", + "author": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023a.", + "venue": "arXiv preprint arXiv:2302.13971.", + "url": null + } + }, + { + "43": { + "title": "Llama 2: Open foundation and fine-tuned chat models.", + "author": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023b.", + "venue": "arXiv preprint arXiv:2307.09288.", + "url": null + } + }, + { + "44": { + "title": "Cider: Consensus-based image description evaluation.", + "author": "Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015.", + "venue": "In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566\u20134575. IEEE.", + "url": null + } + }, + { + "45": { + "title": "Chain-of-thought prompting elicits reasoning in large language models.", + "author": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022.", + "venue": "Advances in Neural Information Processing Systems, 35:24824\u201324837.", + "url": null + } + }, + { + "46": { + "title": "Visual chatgpt: Talking, drawing and editing with visual foundation models.", + "author": "Chenfei Wu, Shengming Yin, Weizhen Qi, Xiaodong Wang, Zecheng Tang, and Nan Duan. 2023.", + "venue": "arXiv preprint arXiv:2303.04671.", + "url": null + } + }, + { + "47": { + "title": "Against opacity: Explainable ai and large language models for effective digital advertising.", + "author": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 9299\u20139305.", + "url": null + } + }, + { + "48": { + "title": "Hierarchical reasoning network with contrastive learning for few-shot human-object interaction recognition.", + "author": "Jiale Yu, Baopeng Zhang, Qirui Li, Haoyang Chen, and Zhu Teng. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 4260\u20134268.", + "url": null + } + }, + { + "49": { + "title": "From recognition to cognition: Visual commonsense reasoning.", + "author": "Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. 2019.", + "venue": "In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6720\u20136731.", + "url": null + } + }, + { + "50": { + "title": "Mmicl: Empowering vision-language model with multi-modal in-context learning.", + "author": "Haozhe Zhao, Zefan Cai, Shuzheng Si, Xiaojian Ma, Kaikai An, Liang Chen, Zixuan Liu, Sheng Wang, Wenjuan Han, and Baobao Chang. 2023.", + "venue": "arXiv preprint arXiv:2309.07915.", + "url": null + } + }, + { + "51": { + "title": "Ddcot: Duty-distinct chain-of-thought prompting for multimodal reasoning in language models.", + "author": "Ge Zheng, Bin Yang, Jiajin Tang, Hong-Yu Zhou, and Sibei Yang. 2023.", + "venue": "Advances in Neural Information Processing Systems, 36:5168\u20135191.", + "url": null + } + }, + { + "52": { + "title": "Sur-adapter: Enhancing text-to-image pre-trained diffusion models with large language models.", + "author": "Shanshan Zhong, Zhongzhan Huang, Weushao Wen, Jinghui Qin, and Liang Lin. 2023.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 567\u2013578.", + "url": null + } + }, + { + "53": { + "title": "Learning from easy to hard pairs: Multi-step reasoning network for human-object interaction detection.", + "author": "Yuchen Zhou, Guang Tan, Mengtang Li, and Chao Gou. 2023a.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 4368\u20134377.", + "url": null + } + }, + { + "54": { + "title": "Uncovering the unseen: Discover hidden intentions by micro-behavior graph reasoning.", + "author": "Zhuo Zhou, Wenxuan Liu, Danni Xu, Zheng Wang, and Jian Zhao. 2023b.", + "venue": "In Proceedings of the 31st ACM International Conference on Multimedia, pages 6623\u20136633.", + "url": null + } + }, + { + "55": { + "title": "Minigpt-4: Enhancing vision-language understanding with advanced large language models.", + "author": "Deyao Zhu, Jun Chen, Xiaoqian Shen, Xiang Li, and Mohamed Elhoseiny. 2023.", + "venue": "arXiv preprint arXiv:2304.10592.", + "url": null + } + } + ], + "url": "http://arxiv.org/html/2409.13980v1" +} \ No newline at end of file