coyotte508 HF staff commited on
Commit
a675894
1 Parent(s): 905f8ef

Create 2306.05425.atom

Browse files
Files changed (1) hide show
  1. arxiv/2306.05425.atom +71 -0
arxiv/2306.05425.atom ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <feed xmlns="http://www.w3.org/2005/Atom">
3
+ <link href="http://arxiv.org/api/query?search_query%3D%26id_list%3D2306.05425%26start%3D0%26max_results%3D1" rel="self" type="application/atom+xml"/>
4
+ <title type="html">ArXiv Query: search_query=&amp;id_list=2306.05425&amp;start=0&amp;max_results=1</title>
5
+ <id>http://arxiv.org/api/UKr4sG8yTUNI8GqLUahlRDrZ5vk</id>
6
+ <updated>2023-06-09T00:00:00-04:00</updated>
7
+ <opensearch:totalResults xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:totalResults>
8
+ <opensearch:startIndex xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">0</opensearch:startIndex>
9
+ <opensearch:itemsPerPage xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:itemsPerPage>
10
+ <entry>
11
+ <id>http://arxiv.org/abs/2306.05425v1</id>
12
+ <updated>2023-06-08T17:59:56Z</updated>
13
+ <published>2023-06-08T17:59:56Z</published>
14
+ <title>MIMIC-IT: Multi-Modal In-Context Instruction Tuning</title>
15
+ <summary> High-quality instructions and responses are essential for the zero-shot
16
+ performance of large language models on interactive natural language tasks. For
17
+ interactive vision-language tasks involving intricate visual scenes, a large
18
+ quantity of diverse and creative instruction-response pairs should be
19
+ imperative to tune vision-language models (VLMs). Nevertheless, the current
20
+ availability of vision-language instruction-response pairs in terms of
21
+ quantity, diversity, and creativity remains limited, posing challenges to the
22
+ generalization of interactive VLMs. Here we present MultI-Modal In-Context
23
+ Instruction Tuning (MIMIC-IT), a dataset comprising 2.8 million multimodal
24
+ instruction-response pairs, with 2.2 million unique instructions derived from
25
+ images and videos. Each pair is accompanied by multi-modal in-context
26
+ information, forming conversational contexts aimed at empowering VLMs in
27
+ perception, reasoning, and planning. The instruction-response collection
28
+ process, dubbed as Syphus, is scaled using an automatic annotation pipeline
29
+ that combines human expertise with GPT's capabilities. Using the MIMIC-IT
30
+ dataset, we train a large VLM named Otter. Based on extensive evaluations
31
+ conducted on vision-language benchmarks, it has been observed that Otter
32
+ demonstrates remarkable proficiency in multi-modal perception, reasoning, and
33
+ in-context learning. Human evaluation reveals it effectively aligns with the
34
+ user's intentions. We release the MIMIC-IT dataset, instruction-response
35
+ collection pipeline, benchmarks, and the Otter model.
36
+ </summary>
37
+ <author>
38
+ <name>Bo Li</name>
39
+ </author>
40
+ <author>
41
+ <name>Yuanhan Zhang</name>
42
+ </author>
43
+ <author>
44
+ <name>Liangyu Chen</name>
45
+ </author>
46
+ <author>
47
+ <name>Jinghao Wang</name>
48
+ </author>
49
+ <author>
50
+ <name>Fanyi Pu</name>
51
+ </author>
52
+ <author>
53
+ <name>Jingkang Yang</name>
54
+ </author>
55
+ <author>
56
+ <name>Chunyuan Li</name>
57
+ </author>
58
+ <author>
59
+ <name>Ziwei Liu</name>
60
+ </author>
61
+ <arxiv:comment xmlns:arxiv="http://arxiv.org/schemas/atom">Project page: https://otter-ntu.github.io/ Dataset &amp; code:
62
+ https://github.com/Luodian/otter Initial release, work in progress</arxiv:comment>
63
+ <link href="http://arxiv.org/abs/2306.05425v1" rel="alternate" type="text/html"/>
64
+ <link title="pdf" href="http://arxiv.org/pdf/2306.05425v1" rel="related" type="application/pdf"/>
65
+ <arxiv:primary_category xmlns:arxiv="http://arxiv.org/schemas/atom" term="cs.CV" scheme="http://arxiv.org/schemas/atom"/>
66
+ <category term="cs.CV" scheme="http://arxiv.org/schemas/atom"/>
67
+ <category term="cs.AI" scheme="http://arxiv.org/schemas/atom"/>
68
+ <category term="cs.CL" scheme="http://arxiv.org/schemas/atom"/>
69
+ <category term="cs.HC" scheme="http://arxiv.org/schemas/atom"/>
70
+ </entry>
71
+ </feed>