suyash2739
commited on
Commit
•
010dfa3
1
Parent(s):
70a82b4
Update README.md
Browse files
README.md
CHANGED
@@ -41,7 +41,7 @@ dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for
|
|
41 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
42 |
|
43 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
44 |
-
model_name = "suyash2739/
|
45 |
max_seq_length = max_seq_length,
|
46 |
dtype = dtype,
|
47 |
load_in_4bit = load_in_4bit,
|
|
|
41 |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
42 |
|
43 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
44 |
+
model_name = "suyash2739/English_to_Hinglish_cmu_hinglish_dog",
|
45 |
max_seq_length = max_seq_length,
|
46 |
dtype = dtype,
|
47 |
load_in_4bit = load_in_4bit,
|