hysts HF staff commited on
Commit
d098b8e
1 Parent(s): 5a30814
Files changed (1) hide show
  1. data.json +9 -5
data.json CHANGED
@@ -3296,7 +3296,7 @@
3296
  },
3297
  {
3298
  "id": 19618,
3299
- "title": "Planting a SEED of Vision in Large Language Model",
3300
  "authors": [
3301
  "Yuying Ge",
3302
  "Sijie Zhao",
@@ -3309,10 +3309,14 @@
3309
  "abstract": "The great success of Large Language Models (LLMs) has expanded the potential of multimodality, contributing to the gradual evolution of General Artificial Intelligence (AGI). A true AGI agent should not only possess the capability to perform predefined multi-tasks but also exhibit emergent abilities in an open-world context. However, despite the considerable advancements made by recent multimodal LLMs, they still fall short in effectively unifying comprehension and generation tasks, let alone open-world emergent abilities. We contend that the key to overcoming the present impasse lies in enabling text and images to be represented and processed interchangeably within a unified autoregressive Transformer. To this end, we introduce $\\textbf{SEED}$, an elaborate image tokenizer that empowers LLMs with the ability to $\\textbf{SEE}$ and $\\textbf{D}$raw at the same time. We identify two crucial design principles: (1) Image tokens should be independent of 2D physical patch positions and instead be produced with a $\\textit{1D causal dependency}$, exhibiting intrinsic interdependence that aligns with the left-to-right autoregressive prediction mechanism in LLMs. (2) Image tokens should capture $\\textit{high-level semantics}$ consistent with the degree of semantic abstraction in words, and be optimized for both discriminativeness and reconstruction during the tokenizer training phase. With SEED tokens, LLM is able to perform scalable multimodal autoregression under its original training recipe, i.e., next-word prediction. SEED-LLaMA is therefore produced by large-scale pretraining and instruction tuning on the interleaved textual and visual data, demonstrating impressive performance on a broad range of multimodal comprehension and generation tasks. More importantly, SEED-LLaMA has exhibited compositional emergent abilities such as multi-turn in-context multimodal generation, acting like your AI assistant. The code and models will be publicly released.",
3310
  "type": "Poster",
3311
  "OpenReview": "https://openreview.net/forum?id=0Nui91LBQS",
3312
- "arxiv_id": "2307.08041",
3313
- "GitHub": [],
 
 
3314
  "Space": [],
3315
- "Model": [],
 
 
3316
  "Dataset": []
3317
  },
3318
  {
@@ -44385,4 +44389,4 @@
44385
  "Model": [],
44386
  "Dataset": []
44387
  }
44388
- ]
 
3296
  },
3297
  {
3298
  "id": 19618,
3299
+ "title": "Making LLaMA SEE and Draw with SEED Tokenizer",
3300
  "authors": [
3301
  "Yuying Ge",
3302
  "Sijie Zhao",
 
3309
  "abstract": "The great success of Large Language Models (LLMs) has expanded the potential of multimodality, contributing to the gradual evolution of General Artificial Intelligence (AGI). A true AGI agent should not only possess the capability to perform predefined multi-tasks but also exhibit emergent abilities in an open-world context. However, despite the considerable advancements made by recent multimodal LLMs, they still fall short in effectively unifying comprehension and generation tasks, let alone open-world emergent abilities. We contend that the key to overcoming the present impasse lies in enabling text and images to be represented and processed interchangeably within a unified autoregressive Transformer. To this end, we introduce $\\textbf{SEED}$, an elaborate image tokenizer that empowers LLMs with the ability to $\\textbf{SEE}$ and $\\textbf{D}$raw at the same time. We identify two crucial design principles: (1) Image tokens should be independent of 2D physical patch positions and instead be produced with a $\\textit{1D causal dependency}$, exhibiting intrinsic interdependence that aligns with the left-to-right autoregressive prediction mechanism in LLMs. (2) Image tokens should capture $\\textit{high-level semantics}$ consistent with the degree of semantic abstraction in words, and be optimized for both discriminativeness and reconstruction during the tokenizer training phase. With SEED tokens, LLM is able to perform scalable multimodal autoregression under its original training recipe, i.e., next-word prediction. SEED-LLaMA is therefore produced by large-scale pretraining and instruction tuning on the interleaved textual and visual data, demonstrating impressive performance on a broad range of multimodal comprehension and generation tasks. More importantly, SEED-LLaMA has exhibited compositional emergent abilities such as multi-turn in-context multimodal generation, acting like your AI assistant. The code and models will be publicly released.",
3310
  "type": "Poster",
3311
  "OpenReview": "https://openreview.net/forum?id=0Nui91LBQS",
3312
+ "arxiv_id": "2310.01218",
3313
+ "GitHub": [
3314
+ "https://github.com/AILab-CVC/SEED"
3315
+ ],
3316
  "Space": [],
3317
+ "Model": [
3318
+ "https://huggingface.co/AILab-CVC/SEED"
3319
+ ],
3320
  "Dataset": []
3321
  },
3322
  {
 
44389
  "Model": [],
44390
  "Dataset": []
44391
  }
44392
+ ]