Corrected the Version Number
#4
by
SalmanFaroz
- opened
README.md
CHANGED
@@ -347,8 +347,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
347 |
|
348 |
device = "cuda" # the device to load the model onto
|
349 |
|
350 |
-
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.
|
351 |
-
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.
|
352 |
|
353 |
messages = [
|
354 |
{"role": "user", "content": "What is your favourite condiment?"},
|
|
|
347 |
|
348 |
device = "cuda" # the device to load the model onto
|
349 |
|
350 |
+
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
351 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
352 |
|
353 |
messages = [
|
354 |
{"role": "user", "content": "What is your favourite condiment?"},
|