Update README.md (#3)
Browse files- Update README.md (ae0f58f719f3b4e4c46f1f3188f2246bb73070fe)
README.md
CHANGED
@@ -109,7 +109,10 @@ print(tokenizer.decode(outputs[0]))
|
|
109 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
110 |
|
111 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
112 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
113 |
|
114 |
input_text = "Write me a poem about Machine Learning."
|
115 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -148,7 +151,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
148 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
149 |
|
150 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
151 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
152 |
|
153 |
input_text = "Write me a poem about Machine Learning."
|
154 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
@@ -166,7 +171,9 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
166 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
167 |
|
168 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
169 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
170 |
|
171 |
input_text = "Write me a poem about Machine Learning."
|
172 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
109 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
110 |
|
111 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
112 |
+
model = AutoModelForCausalLM.from_pretrained(
|
113 |
+
"google/gemma-2-27b-it",
|
114 |
+
device_map="auto",
|
115 |
+
torch_dtype=torch.bfloat16)
|
116 |
|
117 |
input_text = "Write me a poem about Machine Learning."
|
118 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
151 |
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
152 |
|
153 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
154 |
+
model = AutoModelForCausalLM.from_pretrained(
|
155 |
+
"google/gemma-2-27b-it",
|
156 |
+
quantization_config=quantization_config)
|
157 |
|
158 |
input_text = "Write me a poem about Machine Learning."
|
159 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
|
|
171 |
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
172 |
|
173 |
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-27b-it")
|
174 |
+
model = AutoModelForCausalLM.from_pretrained(
|
175 |
+
"google/gemma-2-27b-it",
|
176 |
+
quantization_config=quantization_config)
|
177 |
|
178 |
input_text = "Write me a poem about Machine Learning."
|
179 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|