ncbateman commited on
Commit
7e8775f
1 Parent(s): 5f08cd7

End of training

Browse files
Files changed (2) hide show
  1. README.md +17 -9
  2. adapter_model.bin +1 -1
README.md CHANGED
@@ -24,8 +24,16 @@ bf16: auto
24
  chat_template: llama3
25
  dataset_prepared_path: null
26
  datasets:
27
- - path: mhenrichsen/alpaca_2k_test
28
- type: alpaca
 
 
 
 
 
 
 
 
29
  debug: null
30
  deepspeed: null
31
  early_stopping_patience: null
@@ -56,7 +64,7 @@ lora_target_linear: true
56
  lr_scheduler: cosine
57
  max_steps: 10
58
  micro_batch_size: 2
59
- mlflow_experiment_name: mhenrichsen/alpaca_2k_test
60
  model_type: LlamaForCausalLM
61
  num_epochs: 1
62
  optimizer: adamw_bnb_8bit
@@ -77,7 +85,7 @@ wandb_entity: breakfasthut
77
  wandb_mode: online
78
  wandb_project: tuning-miner
79
  wandb_run: miner
80
- wandb_runid: 383a850e-bb15-45a2-8f4b-fc96eb001a74
81
  warmup_steps: 10
82
  weight_decay: 0.0
83
  xformers_attention: null
@@ -90,7 +98,7 @@ xformers_attention: null
90
 
91
  This model is a fine-tuned version of [unsloth/Llama-3.2-1B-Instruct](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) on the None dataset.
92
  It achieves the following results on the evaluation set:
93
- - Loss: 1.2168
94
 
95
  ## Model description
96
 
@@ -124,10 +132,10 @@ The following hyperparameters were used during training:
124
 
125
  | Training Loss | Epoch | Step | Validation Loss |
126
  |:-------------:|:------:|:----:|:---------------:|
127
- | 1.3218 | 0.0042 | 1 | 1.2625 |
128
- | 1.3028 | 0.0126 | 3 | 1.2629 |
129
- | 1.4831 | 0.0253 | 6 | 1.2133 |
130
- | 1.2899 | 0.0379 | 9 | 1.2168 |
131
 
132
 
133
  ### Framework versions
 
24
  chat_template: llama3
25
  dataset_prepared_path: null
26
  datasets:
27
+ - data_files:
28
+ - MATH-Hard_train_data.json
29
+ ds_type: json
30
+ path: /workspace/input_data/MATH-Hard_train_data.json
31
+ type:
32
+ field_input: problem
33
+ field_instruction: type
34
+ field_output: solution
35
+ system_format: '{system}'
36
+ system_prompt: ''
37
  debug: null
38
  deepspeed: null
39
  early_stopping_patience: null
 
64
  lr_scheduler: cosine
65
  max_steps: 10
66
  micro_batch_size: 2
67
+ mlflow_experiment_name: /tmp/MATH-Hard_train_data.json
68
  model_type: LlamaForCausalLM
69
  num_epochs: 1
70
  optimizer: adamw_bnb_8bit
 
85
  wandb_mode: online
86
  wandb_project: tuning-miner
87
  wandb_run: miner
88
+ wandb_runid: ab3318ee-d929-45d5-97e1-ccfee77df372
89
  warmup_steps: 10
90
  weight_decay: 0.0
91
  xformers_attention: null
 
98
 
99
  This model is a fine-tuned version of [unsloth/Llama-3.2-1B-Instruct](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct) on the None dataset.
100
  It achieves the following results on the evaluation set:
101
+ - Loss: 0.9018
102
 
103
  ## Model description
104
 
 
132
 
133
  | Training Loss | Epoch | Step | Validation Loss |
134
  |:-------------:|:------:|:----:|:---------------:|
135
+ | 1.0005 | 0.0026 | 1 | 1.0009 |
136
+ | 0.9902 | 0.0077 | 3 | 0.9955 |
137
+ | 0.8842 | 0.0155 | 6 | 0.9534 |
138
+ | 0.9599 | 0.0232 | 9 | 0.9018 |
139
 
140
 
141
  ### Framework versions
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e11e87d9f4c05d9e49cabe8c727e90dd550085dc314ff9ba6e0633297e846e2e
3
  size 45169354
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f03b57cee6d30f47901b118b5bd6c333f6b6baaef97d63290ab632a02e841a7
3
  size 45169354