AdrienB134 commited on
Commit
03672a7
1 Parent(s): b94d860

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -36,7 +36,7 @@ load_in_4bit = False # Use 4bit quantization to reduce memory usage. Can be True
36
 
37
 
38
  model, tokenizer = FastLanguageModel.from_pretrained(
39
- model_name = "unsloth/mistral-7b-v0.3", # Choose ANY! eg teknium/OpenHermes-2.5-Mistral-7B
40
  max_seq_length = max_seq_length,
41
  dtype = dtype,
42
  load_in_4bit = load_in_4bit,
 
36
 
37
 
38
  model, tokenizer = FastLanguageModel.from_pretrained(
39
+ model_name = "AdrienB134/French-Alpaca-Mistral-7B-v0.3",
40
  max_seq_length = max_seq_length,
41
  dtype = dtype,
42
  load_in_4bit = load_in_4bit,