Inoichan commited on
Commit
0f8098c
1 Parent(s): 75d77d8

Update usage

Browse files
Files changed (1) hide show
  1. README.md +24 -14
README.md CHANGED
@@ -52,7 +52,7 @@ from mantis.models.mllava import chat_mllava, LlavaForConditionalGeneration, MLl
52
  from mantis.models.mllava.utils import conv_templates
53
  from transformers import AutoTokenizer
54
 
55
- # Set the system prompt
56
  conv_llama_3_elyza = Conversation(
57
  system="<|start_header_id|>system<|end_header_id|>\n\nあなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。",
58
  roles=("user", "assistant"),
@@ -63,7 +63,7 @@ conv_llama_3_elyza = Conversation(
63
  )
64
  conv_templates["llama_3"] = conv_llama_3_elyza
65
 
66
- # 1. load model
67
  device = "cuda" if torch.cuda.is_available() else "cpu"
68
  model_id = "SakanaAI/Llama-3-EvoVLM-JP-v2"
69
 
@@ -72,17 +72,7 @@ processor.tokenizer.pad_token = processor.tokenizer.eos_token
72
 
73
  model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, device_map=device).eval()
74
 
75
- # 2. prepare inputs
76
- text = "<image> <image>それぞれ簡単に説明してください。"
77
- url_list = [
78
- "https://images.unsplash.com/photo-1703023190744-a28366dfcebe?q=80&w=2574&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D",
79
- "https://images.unsplash.com/photo-1541364983171-a8ba01e95cfc?q=80&w=2574&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
80
- ]
81
- images = [
82
- Image.open(requests.get(url, stream=True).raw).convert("RGB") for url in url_list
83
- ]
84
-
85
- # 3. generate
86
  generation_kwargs = {
87
  "max_new_tokens": 128,
88
  "num_beams": 1,
@@ -90,10 +80,30 @@ generation_kwargs = {
90
  "no_repeat_ngram_size": 3,
91
  }
92
 
 
 
 
 
 
 
 
 
 
 
93
  response, history = chat_mllava(text, images, model, processor, **generation_kwargs)
94
 
95
  print(response)
96
- # 1は、クリスマス帽子を被ったフランダースブルドッグの写真です。2は、眼鏡を掛けたフランケンブルドックの写真で、どちらも可愛くて面白いです。
 
 
 
 
 
 
 
 
 
 
97
  ```
98
 
99
  </details>
 
52
  from mantis.models.mllava.utils import conv_templates
53
  from transformers import AutoTokenizer
54
 
55
+ # 1. Set the system prompt
56
  conv_llama_3_elyza = Conversation(
57
  system="<|start_header_id|>system<|end_header_id|>\n\nあなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。",
58
  roles=("user", "assistant"),
 
63
  )
64
  conv_templates["llama_3"] = conv_llama_3_elyza
65
 
66
+ # 2. Load model
67
  device = "cuda" if torch.cuda.is_available() else "cpu"
68
  model_id = "SakanaAI/Llama-3-EvoVLM-JP-v2"
69
 
 
72
 
73
  model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, device_map=device).eval()
74
 
75
+ # 3. Prepare a generate config
 
 
 
 
 
 
 
 
 
 
76
  generation_kwargs = {
77
  "max_new_tokens": 128,
78
  "num_beams": 1,
 
80
  "no_repeat_ngram_size": 3,
81
  }
82
 
83
+ # 4. Generate
84
+ text = "<image>の信号は何色ですか?"
85
+ url_list = [
86
+ "https://images.unsplash.com/photo-1694831404826-3400c48c188d?q=80&w=2070&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D",
87
+ "https://images.unsplash.com/photo-1693240876439-473af88b4ed7?q=80&w=1974&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
88
+ ]
89
+ images = [
90
+ Image.open(requests.get(url_list[0], stream=True).raw).convert("RGB")
91
+ ]
92
+
93
  response, history = chat_mllava(text, images, model, processor, **generation_kwargs)
94
 
95
  print(response)
96
+ # 信号の色は、青色です。
97
+
98
+ # 5. Multi-turn conversation
99
+ text = "では、<image>の信号は?"
100
+ images += [
101
+ Image.open(requests.get(url_list[1], stream=True).raw).convert("RGB")
102
+ ]
103
+ response, history = chat_mllava(text, images, model, processor, history=history, **generation_kwargs)
104
+
105
+ print(response)
106
+ # 赤色
107
  ```
108
 
109
  </details>