ecastera commited on
Commit
f9c55d3
1 Parent(s): 9808aad

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -32,7 +32,7 @@ I strongly advice to run inference in INT8 or INT4 mode, with the help of Bitsan
32
  import torch
33
  from transformers import AutoTokenizer, pipeline, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig
34
 
35
- MODEL = "ecastera/eva-mistral-dolphin-7b-spanish"
36
 
37
  quantization_config = BitsAndBytesConfig(
38
  load_in_4bit=True,
 
32
  import torch
33
  from transformers import AutoTokenizer, pipeline, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig
34
 
35
+ MODEL = "ecastera/ecastera-eva-westlake-7b-spanish"
36
 
37
  quantization_config = BitsAndBytesConfig(
38
  load_in_4bit=True,