dimasik87 commited on
Commit
303918e
·
verified ·
1 Parent(s): 9a03748

End of training

Browse files
Files changed (2) hide show
  1. README.md +31 -27
  2. adapter_model.bin +2 -2
README.md CHANGED
@@ -41,12 +41,12 @@ early_stopping_patience: null
41
  eval_max_new_tokens: 128
42
  eval_table_size: null
43
  evals_per_epoch: 4
44
- flash_attention: true
45
- fp16: true
46
  fsdp: null
47
  fsdp_config: null
48
  gradient_accumulation_steps: 4
49
- gradient_checkpointing: true
50
  group_by_length: false
51
  hub_model_id: dimasik87/8102bae9-c442-4a29-848f-946f8771f1f9
52
  hub_repo: null
@@ -57,28 +57,28 @@ load_in_4bit: false
57
  load_in_8bit: false
58
  local_rank: null
59
  logging_steps: 1
60
- lora_alpha: 16
61
- lora_dropout: 0.1
62
  lora_fan_in_fan_out: null
63
  lora_model_dir: null
64
- lora_r: 8
65
  lora_target_linear: true
66
  lr_scheduler: cosine
67
  max_memory:
68
  0: 70GiB
69
- max_steps: 25
70
- micro_batch_size: 1
71
  mlflow_experiment_name: /tmp/5daf839d73ce7025_train_data.json
72
  model_type: AutoModelForCausalLM
73
- num_epochs: 3
74
- optimizer: adamw_torch
75
  output_dir: miner_id_24
76
  pad_to_sequence_len: true
77
  resume_from_checkpoint: null
78
  s2_attention: null
79
  sample_packing: false
80
- saves_per_epoch: 3
81
- sequence_len: 2028
82
  strict: false
83
  tf32: false
84
  tokenizer_type: AutoTokenizer
@@ -103,7 +103,7 @@ xformers_attention: null
103
 
104
  This model is a fine-tuned version of [unsloth/Hermes-3-Llama-3.1-8B](https://huggingface.co/unsloth/Hermes-3-Llama-3.1-8B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
- - Loss: 0.6604
107
 
108
  ## Model description
109
 
@@ -123,29 +123,33 @@ More information needed
123
 
124
  The following hyperparameters were used during training:
125
  - learning_rate: 0.0002
126
- - train_batch_size: 1
127
- - eval_batch_size: 1
128
  - seed: 42
129
  - gradient_accumulation_steps: 4
130
- - total_train_batch_size: 4
131
- - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
- - training_steps: 25
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
- | 6.2351 | 0.0004 | 1 | 7.1638 |
141
- | 7.0317 | 0.0013 | 3 | 7.1373 |
142
- | 5.6858 | 0.0025 | 6 | 6.6630 |
143
- | 5.0704 | 0.0038 | 9 | 4.9700 |
144
- | 3.2353 | 0.0051 | 12 | 2.9853 |
145
- | 1.8729 | 0.0063 | 15 | 1.5725 |
146
- | 0.8863 | 0.0076 | 18 | 0.9877 |
147
- | 1.4191 | 0.0089 | 21 | 0.7656 |
148
- | 0.397 | 0.0101 | 24 | 0.6604 |
 
 
 
 
149
 
150
 
151
  ### Framework versions
 
41
  eval_max_new_tokens: 128
42
  eval_table_size: null
43
  evals_per_epoch: 4
44
+ flash_attention: false
45
+ fp16: null
46
  fsdp: null
47
  fsdp_config: null
48
  gradient_accumulation_steps: 4
49
+ gradient_checkpointing: false
50
  group_by_length: false
51
  hub_model_id: dimasik87/8102bae9-c442-4a29-848f-946f8771f1f9
52
  hub_repo: null
 
57
  load_in_8bit: false
58
  local_rank: null
59
  logging_steps: 1
60
+ lora_alpha: 32
61
+ lora_dropout: 0.05
62
  lora_fan_in_fan_out: null
63
  lora_model_dir: null
64
+ lora_r: 16
65
  lora_target_linear: true
66
  lr_scheduler: cosine
67
  max_memory:
68
  0: 70GiB
69
+ max_steps: 50
70
+ micro_batch_size: 2
71
  mlflow_experiment_name: /tmp/5daf839d73ce7025_train_data.json
72
  model_type: AutoModelForCausalLM
73
+ num_epochs: 4
74
+ optimizer: adamw_bnb_8bit
75
  output_dir: miner_id_24
76
  pad_to_sequence_len: true
77
  resume_from_checkpoint: null
78
  s2_attention: null
79
  sample_packing: false
80
+ saves_per_epoch: 4
81
+ sequence_len: 1024
82
  strict: false
83
  tf32: false
84
  tokenizer_type: AutoTokenizer
 
103
 
104
  This model is a fine-tuned version of [unsloth/Hermes-3-Llama-3.1-8B](https://huggingface.co/unsloth/Hermes-3-Llama-3.1-8B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
+ - Loss: nan
107
 
108
  ## Model description
109
 
 
123
 
124
  The following hyperparameters were used during training:
125
  - learning_rate: 0.0002
126
+ - train_batch_size: 2
127
+ - eval_batch_size: 2
128
  - seed: 42
129
  - gradient_accumulation_steps: 4
130
+ - total_train_batch_size: 8
131
+ - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
132
  - lr_scheduler_type: cosine
133
  - lr_scheduler_warmup_steps: 10
134
+ - training_steps: 50
135
 
136
  ### Training results
137
 
138
  | Training Loss | Epoch | Step | Validation Loss |
139
  |:-------------:|:------:|:----:|:---------------:|
140
+ | 0.0 | 0.0008 | 1 | nan |
141
+ | 0.0 | 0.0034 | 4 | nan |
142
+ | 0.0 | 0.0068 | 8 | nan |
143
+ | 0.0 | 0.0101 | 12 | nan |
144
+ | 0.0 | 0.0135 | 16 | nan |
145
+ | 0.0 | 0.0169 | 20 | nan |
146
+ | 0.0 | 0.0203 | 24 | nan |
147
+ | 0.0 | 0.0236 | 28 | nan |
148
+ | 0.0 | 0.0270 | 32 | nan |
149
+ | 0.0 | 0.0304 | 36 | nan |
150
+ | 0.0 | 0.0338 | 40 | nan |
151
+ | 0.0 | 0.0371 | 44 | nan |
152
+ | 0.0 | 0.0405 | 48 | nan |
153
 
154
 
155
  ### Framework versions
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec2f9c10a826b35d8c76e3d52568ff4b5c53dbea3b5c23956e2103885b037f5c
3
- size 84047370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45fe9746a7db9cd15165b811a5331f0dc4b1d8ec660979d5129957450fdbfea6
3
+ size 167934026