limhyeonseok commited on
Commit
fdc0062
·
verified ·
1 Parent(s): 55e747c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +16 -4
README.md CHANGED
@@ -81,7 +81,11 @@ pip install torch transformers==4.44.0
81
  from transformers import LlavaNextForConditionalGeneration,LlavaNextProcessor
82
  import torch
83
 
84
- model = LlavaNextForConditionalGeneration.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B', torch_dtype=torch.bfloat16)
 
 
 
 
85
  processor = LlavaNextProcessor.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B')
86
 
87
  with torch.no_grad():
@@ -106,7 +110,7 @@ with torch.no_grad():
106
  )
107
 
108
  bos_token = processor.tokenizer.bos_token_id
109
- chat_messages = torch.cat([torch.tensor([[bos_token]]),chat_messages],dim=-1)
110
 
111
 
112
  output = model.generate(
@@ -127,7 +131,11 @@ from PIL import Image
127
  from transformers import LlavaNextForConditionalGeneration,LlavaNextProcessor
128
  import torch
129
 
130
- model = LlavaNextForConditionalGeneration.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B', torch_dtype=torch.bfloat16)
 
 
 
 
131
  processor = LlavaNextProcessor.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B')
132
 
133
  image = Image.open('[IMAGE_PATH]').convert('RGB')
@@ -144,7 +152,11 @@ messages = [
144
  {'role': 'user', 'content': f"<image>\n{instruction}"}
145
  ]
146
 
147
- chat_messages = processor.tokenizer.apply_chat_template(messages,tokenize=False,add_generation_prompt=True)
 
 
 
 
148
 
149
  inputs = processor(
150
  chat_messages,
 
81
  from transformers import LlavaNextForConditionalGeneration,LlavaNextProcessor
82
  import torch
83
 
84
+ model = LlavaNextForConditionalGeneration.from_pretrained(
85
+ 'Bllossom/llama-3.1-Korean-Bllossom-Vision-8B',
86
+ torch_dtype=torch.bfloat16,
87
+ device_map='auto'
88
+ )
89
  processor = LlavaNextProcessor.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B')
90
 
91
  with torch.no_grad():
 
110
  )
111
 
112
  bos_token = processor.tokenizer.bos_token_id
113
+ chat_messages = torch.cat([torch.tensor([[bos_token]]),chat_messages],dim=-1).to(model.device)
114
 
115
 
116
  output = model.generate(
 
131
  from transformers import LlavaNextForConditionalGeneration,LlavaNextProcessor
132
  import torch
133
 
134
+ model = LlavaNextForConditionalGeneration.from_pretrained(
135
+ 'Bllossom/llama-3.1-Korean-Bllossom-Vision-8B',
136
+ torch_dtype=torch.bfloat16,
137
+ device_map='auto'
138
+ )
139
  processor = LlavaNextProcessor.from_pretrained('Bllossom/llama-3.1-Korean-Bllossom-Vision-8B')
140
 
141
  image = Image.open('[IMAGE_PATH]').convert('RGB')
 
152
  {'role': 'user', 'content': f"<image>\n{instruction}"}
153
  ]
154
 
155
+ chat_messages = processor.tokenizer.apply_chat_template(
156
+ messages,
157
+ tokenize=False,
158
+ add_generation_prompt=True
159
+ ).to(model.device)
160
 
161
  inputs = processor(
162
  chat_messages,