Add new config options to Readme
Browse files
README.md
CHANGED
@@ -125,6 +125,8 @@ datasets:
|
|
125 |
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
126 |
# subsequent training attempts load faster, relative path
|
127 |
dataset_prepared_path: data/last_run_prepared
|
|
|
|
|
128 |
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc
|
129 |
val_set_size: 0.04
|
130 |
|
@@ -206,12 +208,14 @@ auto_resume_from_checkpoints: false
|
|
206 |
|
207 |
# don't mess with this, it's here for accelerate and torchrun
|
208 |
local_rank:
|
209 |
-
# add or change special tokens
|
210 |
|
|
|
211 |
special_tokens:
|
212 |
# bos_token: "<s>"
|
213 |
# eos_token: "</s>"
|
214 |
# unk_token: "<unk>"
|
|
|
|
|
215 |
|
216 |
# FSDP
|
217 |
fsdp:
|
|
|
125 |
# axolotl attempts to save the dataset as an arrow after packing the data together so
|
126 |
# subsequent training attempts load faster, relative path
|
127 |
dataset_prepared_path: data/last_run_prepared
|
128 |
+
# push prepared dataset to hub
|
129 |
+
push_dataset_to_hub: # repo path
|
130 |
# How much of the dataset to set aside as evaluation. 1 = 100%, 0.50 = 50%, etc
|
131 |
val_set_size: 0.04
|
132 |
|
|
|
208 |
|
209 |
# don't mess with this, it's here for accelerate and torchrun
|
210 |
local_rank:
|
|
|
211 |
|
212 |
+
# add or change special tokens
|
213 |
special_tokens:
|
214 |
# bos_token: "<s>"
|
215 |
# eos_token: "</s>"
|
216 |
# unk_token: "<unk>"
|
217 |
+
# add extra tokens
|
218 |
+
tokens:
|
219 |
|
220 |
# FSDP
|
221 |
fsdp:
|