winglian commited on
Commit
24566eb
1 Parent(s): b84055a

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. configs/tiny-mistral.yml +70 -0
configs/tiny-mistral.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: openaccess-ai-collective/tiny-mistral
2
+ base_model_config: openaccess-ai-collective/tiny-mistral
3
+ model_type: MistralForCausalLM
4
+ tokenizer_type: LlamaTokenizer
5
+ is_mistral_derived_model: true
6
+ model_config:
7
+ dropout_p: 0.1
8
+
9
+ load_in_8bit: false
10
+ load_in_4bit: false
11
+ strict: false
12
+
13
+ datasets:
14
+ - path: emrgnt-cmplxty/sciphi-textbooks-are-all-you-need
15
+ type: completion
16
+ field: completion
17
+
18
+ dataset_prepared_path: last_run_prepared
19
+ val_set_size: 0.001
20
+ output_dir: ./mistral-textbooks
21
+ hub_model_id: winglian/tiny-mistral-textbooks
22
+
23
+ sequence_len: 4096
24
+ sample_packing: true
25
+ pad_to_sequence_len: true
26
+
27
+ wandb_project: tiny-mistral-textbooks
28
+ wandb_entity:
29
+ wandb_watch:
30
+ wandb_run_id:
31
+ wandb_log_model:
32
+
33
+ gradient_accumulation_steps: 8
34
+ micro_batch_size: 4
35
+ num_epochs: 1
36
+ optimizer: adamw_torch
37
+ lr_scheduler: cosine
38
+ adam_beta2: 0.95
39
+ adam_epsilon: 0.00001
40
+ max_grad_norm: 1.0
41
+ learning_rate: 0.000025
42
+
43
+ train_on_inputs: false
44
+ group_by_length: false
45
+ bf16: true
46
+ fp16: false
47
+ tf32: false
48
+
49
+ gradient_checkpointing: true
50
+ early_stopping_patience:
51
+ resume_from_checkpoint:
52
+ local_rank:
53
+ logging_steps: 1
54
+ xformers_attention:
55
+ flash_attention: true
56
+
57
+ warmup_steps: 10
58
+ eval_steps: 0.1
59
+ eval_table_size:
60
+ eval_table_max_new_tokens:
61
+ save_steps: 0.1
62
+ debug:
63
+ deepspeed:
64
+ weight_decay: 0.1
65
+ fsdp:
66
+ fsdp_config:
67
+ special_tokens:
68
+ bos_token: "<s>"
69
+ eos_token: "</s>"
70
+ unk_token: "<unk>"