lucyknada commited on
Commit
1bb06e4
1 Parent(s): 29ad176

Upload ./README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +163 -0
README.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - axolotl
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: magnum-v4-22b-r4
8
+ results: []
9
+ ---
10
+ ### exl2 quant (measurement.json in main branch)
11
+ ---
12
+ ### check revisions for quants
13
+ ---
14
+
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl)
20
+ <details><summary>See axolotl config</summary>
21
+
22
+ axolotl version: `0.4.1`
23
+ ```yaml
24
+ base_model: /workspace/models/Mistral-Small-Instruct-2409
25
+ model_type: AutoModelForCausalLM
26
+ tokenizer_type: AutoTokenizer
27
+
28
+ hub_model_id: anthracite-core/magnum-v4-22b-r4
29
+ hub_strategy: "all_checkpoints"
30
+ push_dataset_to_hub:
31
+ hf_use_auth_token: true
32
+
33
+ plugins:
34
+ - axolotl.integrations.liger.LigerPlugin
35
+ liger_rope: true
36
+ liger_rms_norm: true
37
+ liger_swiglu: true
38
+ #liger_cross_entropy: true
39
+ liger_fused_linear_cross_entropy: true
40
+
41
+ load_in_8bit: false
42
+ load_in_4bit: false
43
+ strict: false
44
+
45
+ datasets:
46
+ - path: anthracite-core/c2_logs_32k_mistral-v3_v1.2_no_system
47
+ type: custommistralv2v3
48
+ - path: anthracite-core/kalo-opus-instruct-22k-no-refusal-no-system
49
+ type: custommistralv2v3
50
+ - path: anthracite-core/kalo-opus-instruct-3k-filtered-no-system
51
+ type: custommistralv2v3
52
+ - path: anthracite-org/nopm_claude_writing_fixed
53
+ type: custommistralv2v3
54
+ - path: anthracite-core/kalo_opus_misc_240827_no_system
55
+ type: custommistralv2v3
56
+ - path: anthracite-core/kalo_misc_part2_no_system
57
+ type: custommistralv2v3
58
+ #chat_template: mistral_v2v3
59
+ shuffle_merged_datasets: true
60
+ #default_system_message: "You are an assistant that responds to the user."
61
+ dataset_prepared_path: /workspace/data/magnum-22b-data
62
+ val_set_size: 0.0
63
+ output_dir: /workspace/data/22b-r4-fft-out
64
+
65
+ sequence_len: 32768
66
+ sample_packing: true
67
+ pad_to_sequence_len: true
68
+
69
+ adapter:
70
+ lora_model_dir:
71
+ lora_r:
72
+ lora_alpha:
73
+ lora_dropout:
74
+ lora_target_linear:
75
+ lora_fan_in_fan_out:
76
+
77
+ wandb_project: 22b-magnum-fft
78
+ wandb_entity:
79
+ wandb_watch:
80
+ wandb_name: v4-r4-attempt-01
81
+ wandb_log_model:
82
+
83
+ gradient_accumulation_steps: 2
84
+ micro_batch_size: 1
85
+ num_epochs: 2
86
+ optimizer: adamw_bnb_8bit
87
+ lr_scheduler: cosine
88
+ learning_rate: 0.000004
89
+
90
+ train_on_inputs: false
91
+ group_by_length: false
92
+ bf16: auto
93
+ fp16:
94
+ tf32: false
95
+
96
+ gradient_checkpointing: true
97
+ early_stopping_patience:
98
+ resume_from_checkpoint:
99
+ local_rank:
100
+ logging_steps: 1
101
+ xformers_attention:
102
+ flash_attention: true
103
+
104
+ warmup_steps: 40
105
+ evals_per_epoch:
106
+ eval_table_size:
107
+ eval_max_new_tokens:
108
+ saves_per_epoch: 2
109
+ debug:
110
+ deepspeed: deepspeed_configs/zero3_bf16.json
111
+ weight_decay: 0.1
112
+ fsdp:
113
+ fsdp_config:
114
+ special_tokens:
115
+ ```
116
+
117
+ </details><br>
118
+
119
+ # magnum-v4-22b-r4
120
+
121
+ This model was trained from scratch on the None dataset.
122
+
123
+ ## Model description
124
+
125
+ More information needed
126
+
127
+ ## Intended uses & limitations
128
+
129
+ More information needed
130
+
131
+ ## Training and evaluation data
132
+
133
+ More information needed
134
+
135
+ ## Training procedure
136
+
137
+ ### Training hyperparameters
138
+
139
+ The following hyperparameters were used during training:
140
+ - learning_rate: 4e-06
141
+ - train_batch_size: 1
142
+ - eval_batch_size: 1
143
+ - seed: 42
144
+ - distributed_type: multi-GPU
145
+ - num_devices: 8
146
+ - gradient_accumulation_steps: 2
147
+ - total_train_batch_size: 16
148
+ - total_eval_batch_size: 8
149
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
150
+ - lr_scheduler_type: cosine
151
+ - lr_scheduler_warmup_steps: 40
152
+ - num_epochs: 2
153
+
154
+ ### Training results
155
+
156
+
157
+
158
+ ### Framework versions
159
+
160
+ - Transformers 4.45.0.dev0
161
+ - Pytorch 2.3.1+cu121
162
+ - Datasets 2.21.0
163
+ - Tokenizers 0.19.1