codecodebear commited on
Commit
b368f56
·
verified ·
1 Parent(s): b0afa2a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +10 -0
  2. csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors +3 -0
  3. csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors +3 -0
  4. csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt +3 -0
  5. csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors +3 -0
  6. csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md +202 -0
  7. csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors +3 -0
  8. csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt +3 -0
  9. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_config.json +39 -0
  10. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors +3 -0
  11. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/all_results.json +8 -0
  12. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/chat_template.jinja +89 -0
  13. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md +202 -0
  14. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_config.json +39 -0
  15. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors +3 -0
  16. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/added_tokens.json +28 -0
  17. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/chat_template.jinja +89 -0
  18. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/merges.txt +0 -0
  19. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt +3 -0
  20. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/special_tokens_map.json +31 -0
  21. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/trainer_state.json +685 -0
  22. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/merges.txt +0 -0
  23. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/special_tokens_map.json +31 -0
  24. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json +3 -0
  25. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer_config.json +240 -0
  26. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/trainer_log.jsonl +94 -0
  27. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/trainer_state.json +694 -0
  28. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/training_loss.png +0 -0
  29. csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/vocab.json +0 -0
  30. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/README.md +61 -0
  31. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_config.json +39 -0
  32. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors +3 -0
  33. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/added_tokens.json +28 -0
  34. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/all_results.json +8 -0
  35. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/chat_template.jinja +89 -0
  36. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md +202 -0
  37. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors +3 -0
  38. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/chat_template.jinja +89 -0
  39. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt +3 -0
  40. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_0.pth +3 -0
  41. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_1.pth +3 -0
  42. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_2.pth +3 -0
  43. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_3.pth +3 -0
  44. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_4.pth +3 -0
  45. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_5.pth +3 -0
  46. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_6.pth +3 -0
  47. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_7.pth +3 -0
  48. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/scheduler.pt +3 -0
  49. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/training_args.bin +3 -0
  50. csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/special_tokens_map.json +31 -0
.gitattributes CHANGED
@@ -93,3 +93,13 @@ gsm8k__llama3_8b_instruct__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/tok
93
  csqa__llama3_8b_instruct__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
94
  gsm8k__qwen3_8b__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
95
  gsm8k__qwen3_8b__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
93
  csqa__llama3_8b_instruct__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
94
  gsm8k__qwen3_8b__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
95
  gsm8k__qwen3_8b__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
96
+ gsm8k__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
97
+ gsm8k__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
98
+ gsm8k__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
99
+ gsm8k__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
100
+ gsm8k__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
101
+ gsm8k__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
102
+ gsm8k__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
103
+ gsm8k__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/tokenizer.json filter=lfs diff=lfs merge=lfs -text
104
+ csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
105
+ csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json filter=lfs diff=lfs merge=lfs -text
csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e59ae81e95c7ec1ff9f54ed6a4927ae87c231519bdc0c05297a5a5de3b71efb5
3
+ size 83945296
csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e59ae81e95c7ec1ff9f54ed6a4927ae87c231519bdc0c05297a5a5de3b71efb5
3
+ size 83945296
csqa__llama3_8b_instruct__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885fe2c2d78351315673955bcfeb9a1e7cbc0e52f3b907967ed5d34fe17a658d
3
+ size 168149539
csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c34190b141e145a0da05c3cc5e2ae305fe747004b04f8ad847787f46a9e03f3
3
+ size 83945296
csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: meta-llama/Meta-Llama-3-8B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c34190b141e145a0da05c3cc5e2ae305fe747004b04f8ad847787f46a9e03f3
3
+ size 83945296
csqa__llama3_8b_instruct__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9905f822a9cb3f7e06363edd46cd439038efe7fd176f8a2a0338409dd45d178f
3
+ size 168149539
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-8B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "gate_proj",
29
+ "v_proj",
30
+ "up_proj",
31
+ "q_proj",
32
+ "down_proj",
33
+ "o_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f888fbf570c57885834fd84a2e2c7f931f2c515a8e977b9c09d0bf96018b6a
3
+ size 87360584
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 1.8033509624132403e+17,
4
+ "train_loss": 0.5275962893256999,
5
+ "train_runtime": 463.0949,
6
+ "train_samples_per_second": 64.782,
7
+ "train_steps_per_second": 2.028
8
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-8B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-8B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "k_proj",
28
+ "gate_proj",
29
+ "v_proj",
30
+ "up_proj",
31
+ "q_proj",
32
+ "down_proj",
33
+ "o_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f888fbf570c57885834fd84a2e2c7f931f2c515a8e977b9c09d0bf96018b6a
3
+ size 87360584
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7921f271ebc6b0c0c123869d051ffea8e6670bc9dcb0662706fcc51bdf20a00
3
+ size 175012683
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/trainer_state.json ADDED
@@ -0,0 +1,685 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 939,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.03194888178913738,
14
+ "grad_norm": 36.33285903930664,
15
+ "learning_rate": 9.574468085106384e-07,
16
+ "loss": 6.9124,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.06389776357827476,
21
+ "grad_norm": 37.845706939697266,
22
+ "learning_rate": 2.021276595744681e-06,
23
+ "loss": 6.6841,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.09584664536741214,
28
+ "grad_norm": 41.69117736816406,
29
+ "learning_rate": 3.0851063829787237e-06,
30
+ "loss": 6.721,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.12779552715654952,
35
+ "grad_norm": 50.03352737426758,
36
+ "learning_rate": 4.148936170212766e-06,
37
+ "loss": 6.1192,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.1597444089456869,
42
+ "grad_norm": 59.23619842529297,
43
+ "learning_rate": 5.212765957446809e-06,
44
+ "loss": 4.8473,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.19169329073482427,
49
+ "grad_norm": 5.8774261474609375,
50
+ "learning_rate": 6.276595744680851e-06,
51
+ "loss": 2.1142,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.22364217252396165,
56
+ "grad_norm": 6.693775653839111,
57
+ "learning_rate": 7.340425531914894e-06,
58
+ "loss": 1.6119,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.25559105431309903,
63
+ "grad_norm": 4.908555507659912,
64
+ "learning_rate": 8.404255319148937e-06,
65
+ "loss": 1.1825,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.28753993610223644,
70
+ "grad_norm": 2.0115675926208496,
71
+ "learning_rate": 9.46808510638298e-06,
72
+ "loss": 0.5337,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.3194888178913738,
77
+ "grad_norm": 0.8753984570503235,
78
+ "learning_rate": 9.999136119166803e-06,
79
+ "loss": 0.2692,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.3514376996805112,
84
+ "grad_norm": 0.8379660248756409,
85
+ "learning_rate": 9.9922268634943e-06,
86
+ "loss": 0.258,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.38338658146964855,
91
+ "grad_norm": 0.8429737687110901,
92
+ "learning_rate": 9.978417901361958e-06,
93
+ "loss": 0.234,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.41533546325878595,
98
+ "grad_norm": 0.6800509095191956,
99
+ "learning_rate": 9.95772831799724e-06,
100
+ "loss": 0.2045,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.4472843450479233,
105
+ "grad_norm": 1.0576311349868774,
106
+ "learning_rate": 9.930186708264902e-06,
107
+ "loss": 0.2049,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.4792332268370607,
112
+ "grad_norm": 0.681848406791687,
113
+ "learning_rate": 9.895831137146319e-06,
114
+ "loss": 0.1901,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.5111821086261981,
119
+ "grad_norm": 0.7110018730163574,
120
+ "learning_rate": 9.854709087130261e-06,
121
+ "loss": 0.1935,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.5431309904153354,
126
+ "grad_norm": 0.8011160492897034,
127
+ "learning_rate": 9.80687739258782e-06,
128
+ "loss": 0.1888,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.5750798722044729,
133
+ "grad_norm": 0.8912045359611511,
134
+ "learning_rate": 9.7524021612222e-06,
135
+ "loss": 0.194,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.6070287539936102,
140
+ "grad_norm": 1.1133105754852295,
141
+ "learning_rate": 9.691358682701927e-06,
142
+ "loss": 0.1843,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.6389776357827476,
147
+ "grad_norm": 1.3582327365875244,
148
+ "learning_rate": 9.623831324603755e-06,
149
+ "loss": 0.2121,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.670926517571885,
154
+ "grad_norm": 1.2391612529754639,
155
+ "learning_rate": 9.549913415809084e-06,
156
+ "loss": 0.1797,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.7028753993610224,
161
+ "grad_norm": 1.0479785203933716,
162
+ "learning_rate": 9.469707117515068e-06,
163
+ "loss": 0.1636,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.7348242811501597,
168
+ "grad_norm": 1.0715245008468628,
169
+ "learning_rate": 9.383323282038632e-06,
170
+ "loss": 0.1693,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.7667731629392971,
175
+ "grad_norm": 0.9121628403663635,
176
+ "learning_rate": 9.29088129960862e-06,
177
+ "loss": 0.1296,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.7987220447284346,
182
+ "grad_norm": 1.2252585887908936,
183
+ "learning_rate": 9.192508933357753e-06,
184
+ "loss": 0.1393,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.8306709265175719,
189
+ "grad_norm": 1.1432126760482788,
190
+ "learning_rate": 9.088342142742493e-06,
191
+ "loss": 0.1796,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.8626198083067093,
196
+ "grad_norm": 1.1106261014938354,
197
+ "learning_rate": 8.978524895634842e-06,
198
+ "loss": 0.1672,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.8945686900958466,
203
+ "grad_norm": 1.1917647123336792,
204
+ "learning_rate": 8.86320896934581e-06,
205
+ "loss": 0.1813,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.9265175718849841,
210
+ "grad_norm": 1.1675026416778564,
211
+ "learning_rate": 8.742553740855507e-06,
212
+ "loss": 0.1847,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.9584664536741214,
217
+ "grad_norm": 2.2532408237457275,
218
+ "learning_rate": 8.616725966539831e-06,
219
+ "loss": 0.1642,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.9904153354632588,
224
+ "grad_norm": 1.000963807106018,
225
+ "learning_rate": 8.485899551698166e-06,
226
+ "loss": 0.1793,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 1.0223642172523961,
231
+ "grad_norm": 0.7248425483703613,
232
+ "learning_rate": 8.350255310200611e-06,
233
+ "loss": 0.1445,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 1.0543130990415335,
238
+ "grad_norm": 0.936930775642395,
239
+ "learning_rate": 8.209980714586955e-06,
240
+ "loss": 0.1495,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 1.0862619808306708,
245
+ "grad_norm": 0.8912279009819031,
246
+ "learning_rate": 8.065269636962765e-06,
247
+ "loss": 0.1478,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 1.1182108626198084,
252
+ "grad_norm": 1.2984365224838257,
253
+ "learning_rate": 7.916322081050708e-06,
254
+ "loss": 0.169,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 1.1501597444089458,
259
+ "grad_norm": 0.9977064728736877,
260
+ "learning_rate": 7.76334390576742e-06,
261
+ "loss": 0.1555,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 1.182108626198083,
266
+ "grad_norm": 0.8419762849807739,
267
+ "learning_rate": 7.60654654070796e-06,
268
+ "loss": 0.1617,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 1.2140575079872205,
273
+ "grad_norm": 1.5433464050292969,
274
+ "learning_rate": 7.446146693931111e-06,
275
+ "loss": 0.1495,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 1.2460063897763578,
280
+ "grad_norm": 0.7475736737251282,
281
+ "learning_rate": 7.282366052449351e-06,
282
+ "loss": 0.1339,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 1.2779552715654952,
287
+ "grad_norm": 1.4355934858322144,
288
+ "learning_rate": 7.115430975837457e-06,
289
+ "loss": 0.1587,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 1.3099041533546325,
294
+ "grad_norm": 1.2254966497421265,
295
+ "learning_rate": 6.945572183383229e-06,
296
+ "loss": 0.1347,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 1.34185303514377,
301
+ "grad_norm": 0.810607373714447,
302
+ "learning_rate": 6.773024435212678e-06,
303
+ "loss": 0.1292,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 1.3738019169329074,
308
+ "grad_norm": 1.0503230094909668,
309
+ "learning_rate": 6.598026207830428e-06,
310
+ "loss": 0.1636,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 1.4057507987220448,
315
+ "grad_norm": 1.5683962106704712,
316
+ "learning_rate": 6.4208193645237314e-06,
317
+ "loss": 0.1703,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 1.4376996805111821,
322
+ "grad_norm": 1.4046155214309692,
323
+ "learning_rate": 6.241648821085666e-06,
324
+ "loss": 0.1624,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 1.4696485623003195,
329
+ "grad_norm": 1.0360242128372192,
330
+ "learning_rate": 6.060762207319479e-06,
331
+ "loss": 0.1499,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 1.5015974440894568,
336
+ "grad_norm": 1.1383719444274902,
337
+ "learning_rate": 5.878409524791931e-06,
338
+ "loss": 0.1324,
339
+ "step": 470
340
+ },
341
+ {
342
+ "epoch": 1.5335463258785942,
343
+ "grad_norm": 1.3533704280853271,
344
+ "learning_rate": 5.694842801308651e-06,
345
+ "loss": 0.1328,
346
+ "step": 480
347
+ },
348
+ {
349
+ "epoch": 1.5654952076677318,
350
+ "grad_norm": 1.1995248794555664,
351
+ "learning_rate": 5.510315742589042e-06,
352
+ "loss": 0.1467,
353
+ "step": 490
354
+ },
355
+ {
356
+ "epoch": 1.5974440894568689,
357
+ "grad_norm": 1.4972271919250488,
358
+ "learning_rate": 5.325083381622165e-06,
359
+ "loss": 0.1569,
360
+ "step": 500
361
+ },
362
+ {
363
+ "epoch": 1.6293929712460065,
364
+ "grad_norm": 0.6726014018058777,
365
+ "learning_rate": 5.139401726188208e-06,
366
+ "loss": 0.1444,
367
+ "step": 510
368
+ },
369
+ {
370
+ "epoch": 1.6613418530351438,
371
+ "grad_norm": 0.8959933519363403,
372
+ "learning_rate": 4.953527405032723e-06,
373
+ "loss": 0.1727,
374
+ "step": 520
375
+ },
376
+ {
377
+ "epoch": 1.6932907348242812,
378
+ "grad_norm": 1.0952670574188232,
379
+ "learning_rate": 4.767717313182611e-06,
380
+ "loss": 0.1548,
381
+ "step": 530
382
+ },
383
+ {
384
+ "epoch": 1.7252396166134185,
385
+ "grad_norm": 1.1545971632003784,
386
+ "learning_rate": 4.582228256894093e-06,
387
+ "loss": 0.1335,
388
+ "step": 540
389
+ },
390
+ {
391
+ "epoch": 1.7571884984025559,
392
+ "grad_norm": 1.5466020107269287,
393
+ "learning_rate": 4.397316598723385e-06,
394
+ "loss": 0.168,
395
+ "step": 550
396
+ },
397
+ {
398
+ "epoch": 1.7891373801916934,
399
+ "grad_norm": 1.089145302772522,
400
+ "learning_rate": 4.2132379032105695e-06,
401
+ "loss": 0.1604,
402
+ "step": 560
403
+ },
404
+ {
405
+ "epoch": 1.8210862619808306,
406
+ "grad_norm": 1.0096254348754883,
407
+ "learning_rate": 4.030246583666437e-06,
408
+ "loss": 0.1418,
409
+ "step": 570
410
+ },
411
+ {
412
+ "epoch": 1.8530351437699681,
413
+ "grad_norm": 0.9796081781387329,
414
+ "learning_rate": 3.848595550550401e-06,
415
+ "loss": 0.1403,
416
+ "step": 580
417
+ },
418
+ {
419
+ "epoch": 1.8849840255591053,
420
+ "grad_norm": 1.1617341041564941,
421
+ "learning_rate": 3.668535861925509e-06,
422
+ "loss": 0.1433,
423
+ "step": 590
424
+ },
425
+ {
426
+ "epoch": 1.9169329073482428,
427
+ "grad_norm": 0.7589540481567383,
428
+ "learning_rate": 3.4903163764736104e-06,
429
+ "loss": 0.1388,
430
+ "step": 600
431
+ },
432
+ {
433
+ "epoch": 1.9488817891373802,
434
+ "grad_norm": 0.7963077425956726,
435
+ "learning_rate": 3.314183409550293e-06,
436
+ "loss": 0.1251,
437
+ "step": 610
438
+ },
439
+ {
440
+ "epoch": 1.9808306709265175,
441
+ "grad_norm": 1.1559962034225464,
442
+ "learning_rate": 3.140380392754901e-06,
443
+ "loss": 0.1276,
444
+ "step": 620
445
+ },
446
+ {
447
+ "epoch": 2.012779552715655,
448
+ "grad_norm": 0.6292369365692139,
449
+ "learning_rate": 2.969147537486175e-06,
450
+ "loss": 0.1428,
451
+ "step": 630
452
+ },
453
+ {
454
+ "epoch": 2.0447284345047922,
455
+ "grad_norm": 1.1892845630645752,
456
+ "learning_rate": 2.800721502948506e-06,
457
+ "loss": 0.1454,
458
+ "step": 640
459
+ },
460
+ {
461
+ "epoch": 2.07667731629393,
462
+ "grad_norm": 0.8596490621566772,
463
+ "learning_rate": 2.635335069067617e-06,
464
+ "loss": 0.1368,
465
+ "step": 650
466
+ },
467
+ {
468
+ "epoch": 2.108626198083067,
469
+ "grad_norm": 0.8156671524047852,
470
+ "learning_rate": 2.4732168147677927e-06,
471
+ "loss": 0.1159,
472
+ "step": 660
473
+ },
474
+ {
475
+ "epoch": 2.1405750798722045,
476
+ "grad_norm": 1.977015733718872,
477
+ "learning_rate": 2.314590802055232e-06,
478
+ "loss": 0.1417,
479
+ "step": 670
480
+ },
481
+ {
482
+ "epoch": 2.1725239616613417,
483
+ "grad_norm": 1.2084506750106812,
484
+ "learning_rate": 2.159676266344222e-06,
485
+ "loss": 0.1242,
486
+ "step": 680
487
+ },
488
+ {
489
+ "epoch": 2.2044728434504792,
490
+ "grad_norm": 1.1458317041397095,
491
+ "learning_rate": 2.0086873134540626e-06,
492
+ "loss": 0.1095,
493
+ "step": 690
494
+ },
495
+ {
496
+ "epoch": 2.236421725239617,
497
+ "grad_norm": 1.8264726400375366,
498
+ "learning_rate": 1.8618326236955908e-06,
499
+ "loss": 0.155,
500
+ "step": 700
501
+ },
502
+ {
503
+ "epoch": 2.268370607028754,
504
+ "grad_norm": 1.4171464443206787,
505
+ "learning_rate": 1.7193151634562071e-06,
506
+ "loss": 0.1258,
507
+ "step": 710
508
+ },
509
+ {
510
+ "epoch": 2.3003194888178915,
511
+ "grad_norm": 1.4613436460494995,
512
+ "learning_rate": 1.581331904682089e-06,
513
+ "loss": 0.1518,
514
+ "step": 720
515
+ },
516
+ {
517
+ "epoch": 2.3322683706070286,
518
+ "grad_norm": 1.093307614326477,
519
+ "learning_rate": 1.4480735526452427e-06,
520
+ "loss": 0.1299,
521
+ "step": 730
522
+ },
523
+ {
524
+ "epoch": 2.364217252396166,
525
+ "grad_norm": 1.374348521232605,
526
+ "learning_rate": 1.319724282371664e-06,
527
+ "loss": 0.147,
528
+ "step": 740
529
+ },
530
+ {
531
+ "epoch": 2.3961661341853033,
532
+ "grad_norm": 1.2374401092529297,
533
+ "learning_rate": 1.1964614840949002e-06,
534
+ "loss": 0.1209,
535
+ "step": 750
536
+ },
537
+ {
538
+ "epoch": 2.428115015974441,
539
+ "grad_norm": 1.249546766281128,
540
+ "learning_rate": 1.078455518086784e-06,
541
+ "loss": 0.115,
542
+ "step": 760
543
+ },
544
+ {
545
+ "epoch": 2.460063897763578,
546
+ "grad_norm": 1.0231386423110962,
547
+ "learning_rate": 9.658694792042284e-07,
548
+ "loss": 0.1366,
549
+ "step": 770
550
+ },
551
+ {
552
+ "epoch": 2.4920127795527156,
553
+ "grad_norm": 1.1464241743087769,
554
+ "learning_rate": 8.58858971477457e-07,
555
+ "loss": 0.1184,
556
+ "step": 780
557
+ },
558
+ {
559
+ "epoch": 2.523961661341853,
560
+ "grad_norm": 0.9766585230827332,
561
+ "learning_rate": 7.575718930512516e-07,
562
+ "loss": 0.1227,
563
+ "step": 790
564
+ },
565
+ {
566
+ "epoch": 2.5559105431309903,
567
+ "grad_norm": 1.1977977752685547,
568
+ "learning_rate": 6.621482317764105e-07,
569
+ "loss": 0.1092,
570
+ "step": 800
571
+ },
572
+ {
573
+ "epoch": 2.587859424920128,
574
+ "grad_norm": 1.6657644510269165,
575
+ "learning_rate": 5.727198717339511e-07,
576
+ "loss": 0.1475,
577
+ "step": 810
578
+ },
579
+ {
580
+ "epoch": 2.619808306709265,
581
+ "grad_norm": 1.3672709465026855,
582
+ "learning_rate": 4.894104109594466e-07,
583
+ "loss": 0.1242,
584
+ "step": 820
585
+ },
586
+ {
587
+ "epoch": 2.6517571884984026,
588
+ "grad_norm": 1.7629551887512207,
589
+ "learning_rate": 4.123349906194357e-07,
590
+ "loss": 0.1389,
591
+ "step": 830
592
+ },
593
+ {
594
+ "epoch": 2.68370607028754,
595
+ "grad_norm": 0.8762993812561035,
596
+ "learning_rate": 3.416001358759635e-07,
597
+ "loss": 0.1091,
598
+ "step": 840
599
+ },
600
+ {
601
+ "epoch": 2.7156549520766773,
602
+ "grad_norm": 1.4116370677947998,
603
+ "learning_rate": 2.7730360865923954e-07,
604
+ "loss": 0.1272,
605
+ "step": 850
606
+ },
607
+ {
608
+ "epoch": 2.747603833865815,
609
+ "grad_norm": 1.2336490154266357,
610
+ "learning_rate": 2.1953427255185122e-07,
611
+ "loss": 0.1056,
612
+ "step": 860
613
+ },
614
+ {
615
+ "epoch": 2.779552715654952,
616
+ "grad_norm": 1.4972121715545654,
617
+ "learning_rate": 1.6837196997130434e-07,
618
+ "loss": 0.1287,
619
+ "step": 870
620
+ },
621
+ {
622
+ "epoch": 2.8115015974440896,
623
+ "grad_norm": 1.169806957244873,
624
+ "learning_rate": 1.2388741182062348e-07,
625
+ "loss": 0.1094,
626
+ "step": 880
627
+ },
628
+ {
629
+ "epoch": 2.8434504792332267,
630
+ "grad_norm": 1.0118532180786133,
631
+ "learning_rate": 8.614207975952083e-08,
632
+ "loss": 0.1464,
633
+ "step": 890
634
+ },
635
+ {
636
+ "epoch": 2.8753993610223643,
637
+ "grad_norm": 1.9891964197158813,
638
+ "learning_rate": 5.518814123121885e-08,
639
+ "loss": 0.1388,
640
+ "step": 900
641
+ },
642
+ {
643
+ "epoch": 2.9073482428115014,
644
+ "grad_norm": 1.1589871644973755,
645
+ "learning_rate": 3.10683773623488e-08,
646
+ "loss": 0.1236,
647
+ "step": 910
648
+ },
649
+ {
650
+ "epoch": 2.939297124600639,
651
+ "grad_norm": 1.1757524013519287,
652
+ "learning_rate": 1.3816123835588835e-08,
653
+ "loss": 0.1044,
654
+ "step": 920
655
+ },
656
+ {
657
+ "epoch": 2.9712460063897765,
658
+ "grad_norm": 1.4213329553604126,
659
+ "learning_rate": 3.4552248167507576e-09,
660
+ "loss": 0.1423,
661
+ "step": 930
662
+ }
663
+ ],
664
+ "logging_steps": 10,
665
+ "max_steps": 939,
666
+ "num_input_tokens_seen": 0,
667
+ "num_train_epochs": 3,
668
+ "save_steps": 2000,
669
+ "stateful_callbacks": {
670
+ "TrainerControl": {
671
+ "args": {
672
+ "should_epoch_stop": false,
673
+ "should_evaluate": false,
674
+ "should_log": false,
675
+ "should_save": true,
676
+ "should_training_stop": true
677
+ },
678
+ "attributes": {}
679
+ }
680
+ },
681
+ "total_flos": 1.8033509624132403e+17,
682
+ "train_batch_size": 4,
683
+ "trial_name": null,
684
+ "trial_params": null
685
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb13307a71acd8fe81861d94ad54ab689df773318809eed3cbe794b4492dae4
3
+ size 11422654
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/tokenizer_config.json ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ }
213
+ },
214
+ "additional_special_tokens": [
215
+ "<|im_start|>",
216
+ "<|im_end|>",
217
+ "<|object_ref_start|>",
218
+ "<|object_ref_end|>",
219
+ "<|box_start|>",
220
+ "<|box_end|>",
221
+ "<|quad_start|>",
222
+ "<|quad_end|>",
223
+ "<|vision_start|>",
224
+ "<|vision_end|>",
225
+ "<|vision_pad|>",
226
+ "<|image_pad|>",
227
+ "<|video_pad|>"
228
+ ],
229
+ "bos_token": null,
230
+ "clean_up_tokenization_spaces": false,
231
+ "eos_token": "<|im_end|>",
232
+ "errors": "replace",
233
+ "extra_special_tokens": {},
234
+ "model_max_length": 131072,
235
+ "pad_token": "<|endoftext|>",
236
+ "padding_side": "right",
237
+ "split_special_tokens": false,
238
+ "tokenizer_class": "Qwen2Tokenizer",
239
+ "unk_token": null
240
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/trainer_log.jsonl ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 939, "loss": 6.9124, "lr": 9.574468085106384e-07, "epoch": 0.03194888178913738, "percentage": 1.06, "elapsed_time": "0:00:05", "remaining_time": "0:08:59"}
2
+ {"current_steps": 20, "total_steps": 939, "loss": 6.6841, "lr": 2.021276595744681e-06, "epoch": 0.06389776357827476, "percentage": 2.13, "elapsed_time": "0:00:10", "remaining_time": "0:08:07"}
3
+ {"current_steps": 30, "total_steps": 939, "loss": 6.721, "lr": 3.0851063829787237e-06, "epoch": 0.09584664536741214, "percentage": 3.19, "elapsed_time": "0:00:15", "remaining_time": "0:07:48"}
4
+ {"current_steps": 40, "total_steps": 939, "loss": 6.1192, "lr": 4.148936170212766e-06, "epoch": 0.12779552715654952, "percentage": 4.26, "elapsed_time": "0:00:20", "remaining_time": "0:07:34"}
5
+ {"current_steps": 50, "total_steps": 939, "loss": 4.8473, "lr": 5.212765957446809e-06, "epoch": 0.1597444089456869, "percentage": 5.32, "elapsed_time": "0:00:24", "remaining_time": "0:07:23"}
6
+ {"current_steps": 60, "total_steps": 939, "loss": 2.1142, "lr": 6.276595744680851e-06, "epoch": 0.19169329073482427, "percentage": 6.39, "elapsed_time": "0:00:29", "remaining_time": "0:07:15"}
7
+ {"current_steps": 70, "total_steps": 939, "loss": 1.6119, "lr": 7.340425531914894e-06, "epoch": 0.22364217252396165, "percentage": 7.45, "elapsed_time": "0:00:34", "remaining_time": "0:07:07"}
8
+ {"current_steps": 80, "total_steps": 939, "loss": 1.1825, "lr": 8.404255319148937e-06, "epoch": 0.25559105431309903, "percentage": 8.52, "elapsed_time": "0:00:39", "remaining_time": "0:07:01"}
9
+ {"current_steps": 90, "total_steps": 939, "loss": 0.5337, "lr": 9.46808510638298e-06, "epoch": 0.28753993610223644, "percentage": 9.58, "elapsed_time": "0:00:44", "remaining_time": "0:06:55"}
10
+ {"current_steps": 100, "total_steps": 939, "loss": 0.2692, "lr": 9.999136119166803e-06, "epoch": 0.3194888178913738, "percentage": 10.65, "elapsed_time": "0:00:48", "remaining_time": "0:06:48"}
11
+ {"current_steps": 110, "total_steps": 939, "loss": 0.258, "lr": 9.9922268634943e-06, "epoch": 0.3514376996805112, "percentage": 11.71, "elapsed_time": "0:00:53", "remaining_time": "0:06:42"}
12
+ {"current_steps": 120, "total_steps": 939, "loss": 0.234, "lr": 9.978417901361958e-06, "epoch": 0.38338658146964855, "percentage": 12.78, "elapsed_time": "0:00:58", "remaining_time": "0:06:37"}
13
+ {"current_steps": 130, "total_steps": 939, "loss": 0.2045, "lr": 9.95772831799724e-06, "epoch": 0.41533546325878595, "percentage": 13.84, "elapsed_time": "0:01:02", "remaining_time": "0:06:32"}
14
+ {"current_steps": 140, "total_steps": 939, "loss": 0.2049, "lr": 9.930186708264902e-06, "epoch": 0.4472843450479233, "percentage": 14.91, "elapsed_time": "0:01:07", "remaining_time": "0:06:26"}
15
+ {"current_steps": 150, "total_steps": 939, "loss": 0.1901, "lr": 9.895831137146319e-06, "epoch": 0.4792332268370607, "percentage": 15.97, "elapsed_time": "0:01:12", "remaining_time": "0:06:21"}
16
+ {"current_steps": 160, "total_steps": 939, "loss": 0.1935, "lr": 9.854709087130261e-06, "epoch": 0.5111821086261981, "percentage": 17.04, "elapsed_time": "0:01:17", "remaining_time": "0:06:16"}
17
+ {"current_steps": 170, "total_steps": 939, "loss": 0.1888, "lr": 9.80687739258782e-06, "epoch": 0.5431309904153354, "percentage": 18.1, "elapsed_time": "0:01:22", "remaining_time": "0:06:11"}
18
+ {"current_steps": 180, "total_steps": 939, "loss": 0.194, "lr": 9.7524021612222e-06, "epoch": 0.5750798722044729, "percentage": 19.17, "elapsed_time": "0:01:26", "remaining_time": "0:06:05"}
19
+ {"current_steps": 190, "total_steps": 939, "loss": 0.1843, "lr": 9.691358682701927e-06, "epoch": 0.6070287539936102, "percentage": 20.23, "elapsed_time": "0:01:31", "remaining_time": "0:06:00"}
20
+ {"current_steps": 200, "total_steps": 939, "loss": 0.2121, "lr": 9.623831324603755e-06, "epoch": 0.6389776357827476, "percentage": 21.3, "elapsed_time": "0:01:36", "remaining_time": "0:05:55"}
21
+ {"current_steps": 210, "total_steps": 939, "loss": 0.1797, "lr": 9.549913415809084e-06, "epoch": 0.670926517571885, "percentage": 22.36, "elapsed_time": "0:01:41", "remaining_time": "0:05:50"}
22
+ {"current_steps": 220, "total_steps": 939, "loss": 0.1636, "lr": 9.469707117515068e-06, "epoch": 0.7028753993610224, "percentage": 23.43, "elapsed_time": "0:01:45", "remaining_time": "0:05:45"}
23
+ {"current_steps": 230, "total_steps": 939, "loss": 0.1693, "lr": 9.383323282038632e-06, "epoch": 0.7348242811501597, "percentage": 24.49, "elapsed_time": "0:01:50", "remaining_time": "0:05:41"}
24
+ {"current_steps": 240, "total_steps": 939, "loss": 0.1296, "lr": 9.29088129960862e-06, "epoch": 0.7667731629392971, "percentage": 25.56, "elapsed_time": "0:01:55", "remaining_time": "0:05:36"}
25
+ {"current_steps": 250, "total_steps": 939, "loss": 0.1393, "lr": 9.192508933357753e-06, "epoch": 0.7987220447284346, "percentage": 26.62, "elapsed_time": "0:02:00", "remaining_time": "0:05:31"}
26
+ {"current_steps": 260, "total_steps": 939, "loss": 0.1796, "lr": 9.088342142742493e-06, "epoch": 0.8306709265175719, "percentage": 27.69, "elapsed_time": "0:02:04", "remaining_time": "0:05:26"}
27
+ {"current_steps": 270, "total_steps": 939, "loss": 0.1672, "lr": 8.978524895634842e-06, "epoch": 0.8626198083067093, "percentage": 28.75, "elapsed_time": "0:02:09", "remaining_time": "0:05:21"}
28
+ {"current_steps": 280, "total_steps": 939, "loss": 0.1813, "lr": 8.86320896934581e-06, "epoch": 0.8945686900958466, "percentage": 29.82, "elapsed_time": "0:02:14", "remaining_time": "0:05:16"}
29
+ {"current_steps": 290, "total_steps": 939, "loss": 0.1847, "lr": 8.742553740855507e-06, "epoch": 0.9265175718849841, "percentage": 30.88, "elapsed_time": "0:02:19", "remaining_time": "0:05:11"}
30
+ {"current_steps": 300, "total_steps": 939, "loss": 0.1642, "lr": 8.616725966539831e-06, "epoch": 0.9584664536741214, "percentage": 31.95, "elapsed_time": "0:02:23", "remaining_time": "0:05:06"}
31
+ {"current_steps": 310, "total_steps": 939, "loss": 0.1793, "lr": 8.485899551698166e-06, "epoch": 0.9904153354632588, "percentage": 33.01, "elapsed_time": "0:02:28", "remaining_time": "0:05:01"}
32
+ {"current_steps": 320, "total_steps": 939, "loss": 0.1445, "lr": 8.350255310200611e-06, "epoch": 1.0223642172523961, "percentage": 34.08, "elapsed_time": "0:02:33", "remaining_time": "0:04:57"}
33
+ {"current_steps": 330, "total_steps": 939, "loss": 0.1495, "lr": 8.209980714586955e-06, "epoch": 1.0543130990415335, "percentage": 35.14, "elapsed_time": "0:02:38", "remaining_time": "0:04:52"}
34
+ {"current_steps": 340, "total_steps": 939, "loss": 0.1478, "lr": 8.065269636962765e-06, "epoch": 1.0862619808306708, "percentage": 36.21, "elapsed_time": "0:02:43", "remaining_time": "0:04:47"}
35
+ {"current_steps": 350, "total_steps": 939, "loss": 0.169, "lr": 7.916322081050708e-06, "epoch": 1.1182108626198084, "percentage": 37.27, "elapsed_time": "0:02:47", "remaining_time": "0:04:42"}
36
+ {"current_steps": 360, "total_steps": 939, "loss": 0.1555, "lr": 7.76334390576742e-06, "epoch": 1.1501597444089458, "percentage": 38.34, "elapsed_time": "0:02:52", "remaining_time": "0:04:37"}
37
+ {"current_steps": 370, "total_steps": 939, "loss": 0.1617, "lr": 7.60654654070796e-06, "epoch": 1.182108626198083, "percentage": 39.4, "elapsed_time": "0:02:57", "remaining_time": "0:04:32"}
38
+ {"current_steps": 380, "total_steps": 939, "loss": 0.1495, "lr": 7.446146693931111e-06, "epoch": 1.2140575079872205, "percentage": 40.47, "elapsed_time": "0:03:01", "remaining_time": "0:04:27"}
39
+ {"current_steps": 390, "total_steps": 939, "loss": 0.1339, "lr": 7.282366052449351e-06, "epoch": 1.2460063897763578, "percentage": 41.53, "elapsed_time": "0:03:06", "remaining_time": "0:04:22"}
40
+ {"current_steps": 400, "total_steps": 939, "loss": 0.1587, "lr": 7.115430975837457e-06, "epoch": 1.2779552715654952, "percentage": 42.6, "elapsed_time": "0:03:11", "remaining_time": "0:04:17"}
41
+ {"current_steps": 410, "total_steps": 939, "loss": 0.1347, "lr": 6.945572183383229e-06, "epoch": 1.3099041533546325, "percentage": 43.66, "elapsed_time": "0:03:16", "remaining_time": "0:04:13"}
42
+ {"current_steps": 420, "total_steps": 939, "loss": 0.1292, "lr": 6.773024435212678e-06, "epoch": 1.34185303514377, "percentage": 44.73, "elapsed_time": "0:03:20", "remaining_time": "0:04:08"}
43
+ {"current_steps": 430, "total_steps": 939, "loss": 0.1636, "lr": 6.598026207830428e-06, "epoch": 1.3738019169329074, "percentage": 45.79, "elapsed_time": "0:03:25", "remaining_time": "0:04:03"}
44
+ {"current_steps": 440, "total_steps": 939, "loss": 0.1703, "lr": 6.4208193645237314e-06, "epoch": 1.4057507987220448, "percentage": 46.86, "elapsed_time": "0:03:30", "remaining_time": "0:03:58"}
45
+ {"current_steps": 450, "total_steps": 939, "loss": 0.1624, "lr": 6.241648821085666e-06, "epoch": 1.4376996805111821, "percentage": 47.92, "elapsed_time": "0:03:35", "remaining_time": "0:03:53"}
46
+ {"current_steps": 460, "total_steps": 939, "loss": 0.1499, "lr": 6.060762207319479e-06, "epoch": 1.4696485623003195, "percentage": 48.99, "elapsed_time": "0:03:39", "remaining_time": "0:03:48"}
47
+ {"current_steps": 470, "total_steps": 939, "loss": 0.1324, "lr": 5.878409524791931e-06, "epoch": 1.5015974440894568, "percentage": 50.05, "elapsed_time": "0:03:44", "remaining_time": "0:03:44"}
48
+ {"current_steps": 480, "total_steps": 939, "loss": 0.1328, "lr": 5.694842801308651e-06, "epoch": 1.5335463258785942, "percentage": 51.12, "elapsed_time": "0:03:49", "remaining_time": "0:03:39"}
49
+ {"current_steps": 490, "total_steps": 939, "loss": 0.1467, "lr": 5.510315742589042e-06, "epoch": 1.5654952076677318, "percentage": 52.18, "elapsed_time": "0:03:53", "remaining_time": "0:03:34"}
50
+ {"current_steps": 500, "total_steps": 939, "loss": 0.1569, "lr": 5.325083381622165e-06, "epoch": 1.5974440894568689, "percentage": 53.25, "elapsed_time": "0:03:58", "remaining_time": "0:03:29"}
51
+ {"current_steps": 510, "total_steps": 939, "loss": 0.1444, "lr": 5.139401726188208e-06, "epoch": 1.6293929712460065, "percentage": 54.31, "elapsed_time": "0:04:03", "remaining_time": "0:03:24"}
52
+ {"current_steps": 520, "total_steps": 939, "loss": 0.1727, "lr": 4.953527405032723e-06, "epoch": 1.6613418530351438, "percentage": 55.38, "elapsed_time": "0:04:08", "remaining_time": "0:03:20"}
53
+ {"current_steps": 530, "total_steps": 939, "loss": 0.1548, "lr": 4.767717313182611e-06, "epoch": 1.6932907348242812, "percentage": 56.44, "elapsed_time": "0:04:13", "remaining_time": "0:03:15"}
54
+ {"current_steps": 540, "total_steps": 939, "loss": 0.1335, "lr": 4.582228256894093e-06, "epoch": 1.7252396166134185, "percentage": 57.51, "elapsed_time": "0:04:17", "remaining_time": "0:03:10"}
55
+ {"current_steps": 550, "total_steps": 939, "loss": 0.168, "lr": 4.397316598723385e-06, "epoch": 1.7571884984025559, "percentage": 58.57, "elapsed_time": "0:04:22", "remaining_time": "0:03:05"}
56
+ {"current_steps": 560, "total_steps": 939, "loss": 0.1604, "lr": 4.2132379032105695e-06, "epoch": 1.7891373801916934, "percentage": 59.64, "elapsed_time": "0:04:27", "remaining_time": "0:03:00"}
57
+ {"current_steps": 570, "total_steps": 939, "loss": 0.1418, "lr": 4.030246583666437e-06, "epoch": 1.8210862619808306, "percentage": 60.7, "elapsed_time": "0:04:32", "remaining_time": "0:02:56"}
58
+ {"current_steps": 580, "total_steps": 939, "loss": 0.1403, "lr": 3.848595550550401e-06, "epoch": 1.8530351437699681, "percentage": 61.77, "elapsed_time": "0:04:36", "remaining_time": "0:02:51"}
59
+ {"current_steps": 590, "total_steps": 939, "loss": 0.1433, "lr": 3.668535861925509e-06, "epoch": 1.8849840255591053, "percentage": 62.83, "elapsed_time": "0:04:41", "remaining_time": "0:02:46"}
60
+ {"current_steps": 600, "total_steps": 939, "loss": 0.1388, "lr": 3.4903163764736104e-06, "epoch": 1.9169329073482428, "percentage": 63.9, "elapsed_time": "0:04:46", "remaining_time": "0:02:41"}
61
+ {"current_steps": 610, "total_steps": 939, "loss": 0.1251, "lr": 3.314183409550293e-06, "epoch": 1.9488817891373802, "percentage": 64.96, "elapsed_time": "0:04:50", "remaining_time": "0:02:36"}
62
+ {"current_steps": 620, "total_steps": 939, "loss": 0.1276, "lr": 3.140380392754901e-06, "epoch": 1.9808306709265175, "percentage": 66.03, "elapsed_time": "0:04:55", "remaining_time": "0:02:32"}
63
+ {"current_steps": 630, "total_steps": 939, "loss": 0.1428, "lr": 2.969147537486175e-06, "epoch": 2.012779552715655, "percentage": 67.09, "elapsed_time": "0:05:00", "remaining_time": "0:02:27"}
64
+ {"current_steps": 640, "total_steps": 939, "loss": 0.1454, "lr": 2.800721502948506e-06, "epoch": 2.0447284345047922, "percentage": 68.16, "elapsed_time": "0:05:05", "remaining_time": "0:02:22"}
65
+ {"current_steps": 650, "total_steps": 939, "loss": 0.1368, "lr": 2.635335069067617e-06, "epoch": 2.07667731629393, "percentage": 69.22, "elapsed_time": "0:05:10", "remaining_time": "0:02:17"}
66
+ {"current_steps": 660, "total_steps": 939, "loss": 0.1159, "lr": 2.4732168147677927e-06, "epoch": 2.108626198083067, "percentage": 70.29, "elapsed_time": "0:05:14", "remaining_time": "0:02:13"}
67
+ {"current_steps": 670, "total_steps": 939, "loss": 0.1417, "lr": 2.314590802055232e-06, "epoch": 2.1405750798722045, "percentage": 71.35, "elapsed_time": "0:05:19", "remaining_time": "0:02:08"}
68
+ {"current_steps": 680, "total_steps": 939, "loss": 0.1242, "lr": 2.159676266344222e-06, "epoch": 2.1725239616613417, "percentage": 72.42, "elapsed_time": "0:05:24", "remaining_time": "0:02:03"}
69
+ {"current_steps": 690, "total_steps": 939, "loss": 0.1095, "lr": 2.0086873134540626e-06, "epoch": 2.2044728434504792, "percentage": 73.48, "elapsed_time": "0:05:29", "remaining_time": "0:01:58"}
70
+ {"current_steps": 700, "total_steps": 939, "loss": 0.155, "lr": 1.8618326236955908e-06, "epoch": 2.236421725239617, "percentage": 74.55, "elapsed_time": "0:05:33", "remaining_time": "0:01:53"}
71
+ {"current_steps": 710, "total_steps": 939, "loss": 0.1258, "lr": 1.7193151634562071e-06, "epoch": 2.268370607028754, "percentage": 75.61, "elapsed_time": "0:05:38", "remaining_time": "0:01:49"}
72
+ {"current_steps": 720, "total_steps": 939, "loss": 0.1518, "lr": 1.581331904682089e-06, "epoch": 2.3003194888178915, "percentage": 76.68, "elapsed_time": "0:05:43", "remaining_time": "0:01:44"}
73
+ {"current_steps": 730, "total_steps": 939, "loss": 0.1299, "lr": 1.4480735526452427e-06, "epoch": 2.3322683706070286, "percentage": 77.74, "elapsed_time": "0:05:48", "remaining_time": "0:01:39"}
74
+ {"current_steps": 740, "total_steps": 939, "loss": 0.147, "lr": 1.319724282371664e-06, "epoch": 2.364217252396166, "percentage": 78.81, "elapsed_time": "0:05:52", "remaining_time": "0:01:34"}
75
+ {"current_steps": 750, "total_steps": 939, "loss": 0.1209, "lr": 1.1964614840949002e-06, "epoch": 2.3961661341853033, "percentage": 79.87, "elapsed_time": "0:05:57", "remaining_time": "0:01:30"}
76
+ {"current_steps": 760, "total_steps": 939, "loss": 0.115, "lr": 1.078455518086784e-06, "epoch": 2.428115015974441, "percentage": 80.94, "elapsed_time": "0:06:02", "remaining_time": "0:01:25"}
77
+ {"current_steps": 770, "total_steps": 939, "loss": 0.1366, "lr": 9.658694792042284e-07, "epoch": 2.460063897763578, "percentage": 82.0, "elapsed_time": "0:06:06", "remaining_time": "0:01:20"}
78
+ {"current_steps": 780, "total_steps": 939, "loss": 0.1184, "lr": 8.58858971477457e-07, "epoch": 2.4920127795527156, "percentage": 83.07, "elapsed_time": "0:06:11", "remaining_time": "0:01:15"}
79
+ {"current_steps": 790, "total_steps": 939, "loss": 0.1227, "lr": 7.575718930512516e-07, "epoch": 2.523961661341853, "percentage": 84.13, "elapsed_time": "0:06:16", "remaining_time": "0:01:11"}
80
+ {"current_steps": 800, "total_steps": 939, "loss": 0.1092, "lr": 6.621482317764105e-07, "epoch": 2.5559105431309903, "percentage": 85.2, "elapsed_time": "0:06:21", "remaining_time": "0:01:06"}
81
+ {"current_steps": 810, "total_steps": 939, "loss": 0.1475, "lr": 5.727198717339511e-07, "epoch": 2.587859424920128, "percentage": 86.26, "elapsed_time": "0:06:26", "remaining_time": "0:01:01"}
82
+ {"current_steps": 820, "total_steps": 939, "loss": 0.1242, "lr": 4.894104109594466e-07, "epoch": 2.619808306709265, "percentage": 87.33, "elapsed_time": "0:06:30", "remaining_time": "0:00:56"}
83
+ {"current_steps": 830, "total_steps": 939, "loss": 0.1389, "lr": 4.123349906194357e-07, "epoch": 2.6517571884984026, "percentage": 88.39, "elapsed_time": "0:06:35", "remaining_time": "0:00:51"}
84
+ {"current_steps": 840, "total_steps": 939, "loss": 0.1091, "lr": 3.416001358759635e-07, "epoch": 2.68370607028754, "percentage": 89.46, "elapsed_time": "0:06:40", "remaining_time": "0:00:47"}
85
+ {"current_steps": 850, "total_steps": 939, "loss": 0.1272, "lr": 2.7730360865923954e-07, "epoch": 2.7156549520766773, "percentage": 90.52, "elapsed_time": "0:06:44", "remaining_time": "0:00:42"}
86
+ {"current_steps": 860, "total_steps": 939, "loss": 0.1056, "lr": 2.1953427255185122e-07, "epoch": 2.747603833865815, "percentage": 91.59, "elapsed_time": "0:06:49", "remaining_time": "0:00:37"}
87
+ {"current_steps": 870, "total_steps": 939, "loss": 0.1287, "lr": 1.6837196997130434e-07, "epoch": 2.779552715654952, "percentage": 92.65, "elapsed_time": "0:06:54", "remaining_time": "0:00:32"}
88
+ {"current_steps": 880, "total_steps": 939, "loss": 0.1094, "lr": 1.2388741182062348e-07, "epoch": 2.8115015974440896, "percentage": 93.72, "elapsed_time": "0:06:59", "remaining_time": "0:00:28"}
89
+ {"current_steps": 890, "total_steps": 939, "loss": 0.1464, "lr": 8.614207975952083e-08, "epoch": 2.8434504792332267, "percentage": 94.78, "elapsed_time": "0:07:03", "remaining_time": "0:00:23"}
90
+ {"current_steps": 900, "total_steps": 939, "loss": 0.1388, "lr": 5.518814123121885e-08, "epoch": 2.8753993610223643, "percentage": 95.85, "elapsed_time": "0:07:08", "remaining_time": "0:00:18"}
91
+ {"current_steps": 910, "total_steps": 939, "loss": 0.1236, "lr": 3.10683773623488e-08, "epoch": 2.9073482428115014, "percentage": 96.91, "elapsed_time": "0:07:13", "remaining_time": "0:00:13"}
92
+ {"current_steps": 920, "total_steps": 939, "loss": 0.1044, "lr": 1.3816123835588835e-08, "epoch": 2.939297124600639, "percentage": 97.98, "elapsed_time": "0:07:17", "remaining_time": "0:00:09"}
93
+ {"current_steps": 930, "total_steps": 939, "loss": 0.1423, "lr": 3.4552248167507576e-09, "epoch": 2.9712460063897765, "percentage": 99.04, "elapsed_time": "0:07:22", "remaining_time": "0:00:04"}
94
+ {"current_steps": 939, "total_steps": 939, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "0:07:27", "remaining_time": "0:00:00"}
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/trainer_state.json ADDED
@@ -0,0 +1,694 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 939,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.03194888178913738,
14
+ "grad_norm": 36.33285903930664,
15
+ "learning_rate": 9.574468085106384e-07,
16
+ "loss": 6.9124,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.06389776357827476,
21
+ "grad_norm": 37.845706939697266,
22
+ "learning_rate": 2.021276595744681e-06,
23
+ "loss": 6.6841,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.09584664536741214,
28
+ "grad_norm": 41.69117736816406,
29
+ "learning_rate": 3.0851063829787237e-06,
30
+ "loss": 6.721,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.12779552715654952,
35
+ "grad_norm": 50.03352737426758,
36
+ "learning_rate": 4.148936170212766e-06,
37
+ "loss": 6.1192,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.1597444089456869,
42
+ "grad_norm": 59.23619842529297,
43
+ "learning_rate": 5.212765957446809e-06,
44
+ "loss": 4.8473,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.19169329073482427,
49
+ "grad_norm": 5.8774261474609375,
50
+ "learning_rate": 6.276595744680851e-06,
51
+ "loss": 2.1142,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.22364217252396165,
56
+ "grad_norm": 6.693775653839111,
57
+ "learning_rate": 7.340425531914894e-06,
58
+ "loss": 1.6119,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.25559105431309903,
63
+ "grad_norm": 4.908555507659912,
64
+ "learning_rate": 8.404255319148937e-06,
65
+ "loss": 1.1825,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.28753993610223644,
70
+ "grad_norm": 2.0115675926208496,
71
+ "learning_rate": 9.46808510638298e-06,
72
+ "loss": 0.5337,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.3194888178913738,
77
+ "grad_norm": 0.8753984570503235,
78
+ "learning_rate": 9.999136119166803e-06,
79
+ "loss": 0.2692,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.3514376996805112,
84
+ "grad_norm": 0.8379660248756409,
85
+ "learning_rate": 9.9922268634943e-06,
86
+ "loss": 0.258,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.38338658146964855,
91
+ "grad_norm": 0.8429737687110901,
92
+ "learning_rate": 9.978417901361958e-06,
93
+ "loss": 0.234,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.41533546325878595,
98
+ "grad_norm": 0.6800509095191956,
99
+ "learning_rate": 9.95772831799724e-06,
100
+ "loss": 0.2045,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.4472843450479233,
105
+ "grad_norm": 1.0576311349868774,
106
+ "learning_rate": 9.930186708264902e-06,
107
+ "loss": 0.2049,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.4792332268370607,
112
+ "grad_norm": 0.681848406791687,
113
+ "learning_rate": 9.895831137146319e-06,
114
+ "loss": 0.1901,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.5111821086261981,
119
+ "grad_norm": 0.7110018730163574,
120
+ "learning_rate": 9.854709087130261e-06,
121
+ "loss": 0.1935,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.5431309904153354,
126
+ "grad_norm": 0.8011160492897034,
127
+ "learning_rate": 9.80687739258782e-06,
128
+ "loss": 0.1888,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.5750798722044729,
133
+ "grad_norm": 0.8912045359611511,
134
+ "learning_rate": 9.7524021612222e-06,
135
+ "loss": 0.194,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.6070287539936102,
140
+ "grad_norm": 1.1133105754852295,
141
+ "learning_rate": 9.691358682701927e-06,
142
+ "loss": 0.1843,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.6389776357827476,
147
+ "grad_norm": 1.3582327365875244,
148
+ "learning_rate": 9.623831324603755e-06,
149
+ "loss": 0.2121,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.670926517571885,
154
+ "grad_norm": 1.2391612529754639,
155
+ "learning_rate": 9.549913415809084e-06,
156
+ "loss": 0.1797,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.7028753993610224,
161
+ "grad_norm": 1.0479785203933716,
162
+ "learning_rate": 9.469707117515068e-06,
163
+ "loss": 0.1636,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.7348242811501597,
168
+ "grad_norm": 1.0715245008468628,
169
+ "learning_rate": 9.383323282038632e-06,
170
+ "loss": 0.1693,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.7667731629392971,
175
+ "grad_norm": 0.9121628403663635,
176
+ "learning_rate": 9.29088129960862e-06,
177
+ "loss": 0.1296,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.7987220447284346,
182
+ "grad_norm": 1.2252585887908936,
183
+ "learning_rate": 9.192508933357753e-06,
184
+ "loss": 0.1393,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.8306709265175719,
189
+ "grad_norm": 1.1432126760482788,
190
+ "learning_rate": 9.088342142742493e-06,
191
+ "loss": 0.1796,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.8626198083067093,
196
+ "grad_norm": 1.1106261014938354,
197
+ "learning_rate": 8.978524895634842e-06,
198
+ "loss": 0.1672,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.8945686900958466,
203
+ "grad_norm": 1.1917647123336792,
204
+ "learning_rate": 8.86320896934581e-06,
205
+ "loss": 0.1813,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.9265175718849841,
210
+ "grad_norm": 1.1675026416778564,
211
+ "learning_rate": 8.742553740855507e-06,
212
+ "loss": 0.1847,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.9584664536741214,
217
+ "grad_norm": 2.2532408237457275,
218
+ "learning_rate": 8.616725966539831e-06,
219
+ "loss": 0.1642,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.9904153354632588,
224
+ "grad_norm": 1.000963807106018,
225
+ "learning_rate": 8.485899551698166e-06,
226
+ "loss": 0.1793,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 1.0223642172523961,
231
+ "grad_norm": 0.7248425483703613,
232
+ "learning_rate": 8.350255310200611e-06,
233
+ "loss": 0.1445,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 1.0543130990415335,
238
+ "grad_norm": 0.936930775642395,
239
+ "learning_rate": 8.209980714586955e-06,
240
+ "loss": 0.1495,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 1.0862619808306708,
245
+ "grad_norm": 0.8912279009819031,
246
+ "learning_rate": 8.065269636962765e-06,
247
+ "loss": 0.1478,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 1.1182108626198084,
252
+ "grad_norm": 1.2984365224838257,
253
+ "learning_rate": 7.916322081050708e-06,
254
+ "loss": 0.169,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 1.1501597444089458,
259
+ "grad_norm": 0.9977064728736877,
260
+ "learning_rate": 7.76334390576742e-06,
261
+ "loss": 0.1555,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 1.182108626198083,
266
+ "grad_norm": 0.8419762849807739,
267
+ "learning_rate": 7.60654654070796e-06,
268
+ "loss": 0.1617,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 1.2140575079872205,
273
+ "grad_norm": 1.5433464050292969,
274
+ "learning_rate": 7.446146693931111e-06,
275
+ "loss": 0.1495,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 1.2460063897763578,
280
+ "grad_norm": 0.7475736737251282,
281
+ "learning_rate": 7.282366052449351e-06,
282
+ "loss": 0.1339,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 1.2779552715654952,
287
+ "grad_norm": 1.4355934858322144,
288
+ "learning_rate": 7.115430975837457e-06,
289
+ "loss": 0.1587,
290
+ "step": 400
291
+ },
292
+ {
293
+ "epoch": 1.3099041533546325,
294
+ "grad_norm": 1.2254966497421265,
295
+ "learning_rate": 6.945572183383229e-06,
296
+ "loss": 0.1347,
297
+ "step": 410
298
+ },
299
+ {
300
+ "epoch": 1.34185303514377,
301
+ "grad_norm": 0.810607373714447,
302
+ "learning_rate": 6.773024435212678e-06,
303
+ "loss": 0.1292,
304
+ "step": 420
305
+ },
306
+ {
307
+ "epoch": 1.3738019169329074,
308
+ "grad_norm": 1.0503230094909668,
309
+ "learning_rate": 6.598026207830428e-06,
310
+ "loss": 0.1636,
311
+ "step": 430
312
+ },
313
+ {
314
+ "epoch": 1.4057507987220448,
315
+ "grad_norm": 1.5683962106704712,
316
+ "learning_rate": 6.4208193645237314e-06,
317
+ "loss": 0.1703,
318
+ "step": 440
319
+ },
320
+ {
321
+ "epoch": 1.4376996805111821,
322
+ "grad_norm": 1.4046155214309692,
323
+ "learning_rate": 6.241648821085666e-06,
324
+ "loss": 0.1624,
325
+ "step": 450
326
+ },
327
+ {
328
+ "epoch": 1.4696485623003195,
329
+ "grad_norm": 1.0360242128372192,
330
+ "learning_rate": 6.060762207319479e-06,
331
+ "loss": 0.1499,
332
+ "step": 460
333
+ },
334
+ {
335
+ "epoch": 1.5015974440894568,
336
+ "grad_norm": 1.1383719444274902,
337
+ "learning_rate": 5.878409524791931e-06,
338
+ "loss": 0.1324,
339
+ "step": 470
340
+ },
341
+ {
342
+ "epoch": 1.5335463258785942,
343
+ "grad_norm": 1.3533704280853271,
344
+ "learning_rate": 5.694842801308651e-06,
345
+ "loss": 0.1328,
346
+ "step": 480
347
+ },
348
+ {
349
+ "epoch": 1.5654952076677318,
350
+ "grad_norm": 1.1995248794555664,
351
+ "learning_rate": 5.510315742589042e-06,
352
+ "loss": 0.1467,
353
+ "step": 490
354
+ },
355
+ {
356
+ "epoch": 1.5974440894568689,
357
+ "grad_norm": 1.4972271919250488,
358
+ "learning_rate": 5.325083381622165e-06,
359
+ "loss": 0.1569,
360
+ "step": 500
361
+ },
362
+ {
363
+ "epoch": 1.6293929712460065,
364
+ "grad_norm": 0.6726014018058777,
365
+ "learning_rate": 5.139401726188208e-06,
366
+ "loss": 0.1444,
367
+ "step": 510
368
+ },
369
+ {
370
+ "epoch": 1.6613418530351438,
371
+ "grad_norm": 0.8959933519363403,
372
+ "learning_rate": 4.953527405032723e-06,
373
+ "loss": 0.1727,
374
+ "step": 520
375
+ },
376
+ {
377
+ "epoch": 1.6932907348242812,
378
+ "grad_norm": 1.0952670574188232,
379
+ "learning_rate": 4.767717313182611e-06,
380
+ "loss": 0.1548,
381
+ "step": 530
382
+ },
383
+ {
384
+ "epoch": 1.7252396166134185,
385
+ "grad_norm": 1.1545971632003784,
386
+ "learning_rate": 4.582228256894093e-06,
387
+ "loss": 0.1335,
388
+ "step": 540
389
+ },
390
+ {
391
+ "epoch": 1.7571884984025559,
392
+ "grad_norm": 1.5466020107269287,
393
+ "learning_rate": 4.397316598723385e-06,
394
+ "loss": 0.168,
395
+ "step": 550
396
+ },
397
+ {
398
+ "epoch": 1.7891373801916934,
399
+ "grad_norm": 1.089145302772522,
400
+ "learning_rate": 4.2132379032105695e-06,
401
+ "loss": 0.1604,
402
+ "step": 560
403
+ },
404
+ {
405
+ "epoch": 1.8210862619808306,
406
+ "grad_norm": 1.0096254348754883,
407
+ "learning_rate": 4.030246583666437e-06,
408
+ "loss": 0.1418,
409
+ "step": 570
410
+ },
411
+ {
412
+ "epoch": 1.8530351437699681,
413
+ "grad_norm": 0.9796081781387329,
414
+ "learning_rate": 3.848595550550401e-06,
415
+ "loss": 0.1403,
416
+ "step": 580
417
+ },
418
+ {
419
+ "epoch": 1.8849840255591053,
420
+ "grad_norm": 1.1617341041564941,
421
+ "learning_rate": 3.668535861925509e-06,
422
+ "loss": 0.1433,
423
+ "step": 590
424
+ },
425
+ {
426
+ "epoch": 1.9169329073482428,
427
+ "grad_norm": 0.7589540481567383,
428
+ "learning_rate": 3.4903163764736104e-06,
429
+ "loss": 0.1388,
430
+ "step": 600
431
+ },
432
+ {
433
+ "epoch": 1.9488817891373802,
434
+ "grad_norm": 0.7963077425956726,
435
+ "learning_rate": 3.314183409550293e-06,
436
+ "loss": 0.1251,
437
+ "step": 610
438
+ },
439
+ {
440
+ "epoch": 1.9808306709265175,
441
+ "grad_norm": 1.1559962034225464,
442
+ "learning_rate": 3.140380392754901e-06,
443
+ "loss": 0.1276,
444
+ "step": 620
445
+ },
446
+ {
447
+ "epoch": 2.012779552715655,
448
+ "grad_norm": 0.6292369365692139,
449
+ "learning_rate": 2.969147537486175e-06,
450
+ "loss": 0.1428,
451
+ "step": 630
452
+ },
453
+ {
454
+ "epoch": 2.0447284345047922,
455
+ "grad_norm": 1.1892845630645752,
456
+ "learning_rate": 2.800721502948506e-06,
457
+ "loss": 0.1454,
458
+ "step": 640
459
+ },
460
+ {
461
+ "epoch": 2.07667731629393,
462
+ "grad_norm": 0.8596490621566772,
463
+ "learning_rate": 2.635335069067617e-06,
464
+ "loss": 0.1368,
465
+ "step": 650
466
+ },
467
+ {
468
+ "epoch": 2.108626198083067,
469
+ "grad_norm": 0.8156671524047852,
470
+ "learning_rate": 2.4732168147677927e-06,
471
+ "loss": 0.1159,
472
+ "step": 660
473
+ },
474
+ {
475
+ "epoch": 2.1405750798722045,
476
+ "grad_norm": 1.977015733718872,
477
+ "learning_rate": 2.314590802055232e-06,
478
+ "loss": 0.1417,
479
+ "step": 670
480
+ },
481
+ {
482
+ "epoch": 2.1725239616613417,
483
+ "grad_norm": 1.2084506750106812,
484
+ "learning_rate": 2.159676266344222e-06,
485
+ "loss": 0.1242,
486
+ "step": 680
487
+ },
488
+ {
489
+ "epoch": 2.2044728434504792,
490
+ "grad_norm": 1.1458317041397095,
491
+ "learning_rate": 2.0086873134540626e-06,
492
+ "loss": 0.1095,
493
+ "step": 690
494
+ },
495
+ {
496
+ "epoch": 2.236421725239617,
497
+ "grad_norm": 1.8264726400375366,
498
+ "learning_rate": 1.8618326236955908e-06,
499
+ "loss": 0.155,
500
+ "step": 700
501
+ },
502
+ {
503
+ "epoch": 2.268370607028754,
504
+ "grad_norm": 1.4171464443206787,
505
+ "learning_rate": 1.7193151634562071e-06,
506
+ "loss": 0.1258,
507
+ "step": 710
508
+ },
509
+ {
510
+ "epoch": 2.3003194888178915,
511
+ "grad_norm": 1.4613436460494995,
512
+ "learning_rate": 1.581331904682089e-06,
513
+ "loss": 0.1518,
514
+ "step": 720
515
+ },
516
+ {
517
+ "epoch": 2.3322683706070286,
518
+ "grad_norm": 1.093307614326477,
519
+ "learning_rate": 1.4480735526452427e-06,
520
+ "loss": 0.1299,
521
+ "step": 730
522
+ },
523
+ {
524
+ "epoch": 2.364217252396166,
525
+ "grad_norm": 1.374348521232605,
526
+ "learning_rate": 1.319724282371664e-06,
527
+ "loss": 0.147,
528
+ "step": 740
529
+ },
530
+ {
531
+ "epoch": 2.3961661341853033,
532
+ "grad_norm": 1.2374401092529297,
533
+ "learning_rate": 1.1964614840949002e-06,
534
+ "loss": 0.1209,
535
+ "step": 750
536
+ },
537
+ {
538
+ "epoch": 2.428115015974441,
539
+ "grad_norm": 1.249546766281128,
540
+ "learning_rate": 1.078455518086784e-06,
541
+ "loss": 0.115,
542
+ "step": 760
543
+ },
544
+ {
545
+ "epoch": 2.460063897763578,
546
+ "grad_norm": 1.0231386423110962,
547
+ "learning_rate": 9.658694792042284e-07,
548
+ "loss": 0.1366,
549
+ "step": 770
550
+ },
551
+ {
552
+ "epoch": 2.4920127795527156,
553
+ "grad_norm": 1.1464241743087769,
554
+ "learning_rate": 8.58858971477457e-07,
555
+ "loss": 0.1184,
556
+ "step": 780
557
+ },
558
+ {
559
+ "epoch": 2.523961661341853,
560
+ "grad_norm": 0.9766585230827332,
561
+ "learning_rate": 7.575718930512516e-07,
562
+ "loss": 0.1227,
563
+ "step": 790
564
+ },
565
+ {
566
+ "epoch": 2.5559105431309903,
567
+ "grad_norm": 1.1977977752685547,
568
+ "learning_rate": 6.621482317764105e-07,
569
+ "loss": 0.1092,
570
+ "step": 800
571
+ },
572
+ {
573
+ "epoch": 2.587859424920128,
574
+ "grad_norm": 1.6657644510269165,
575
+ "learning_rate": 5.727198717339511e-07,
576
+ "loss": 0.1475,
577
+ "step": 810
578
+ },
579
+ {
580
+ "epoch": 2.619808306709265,
581
+ "grad_norm": 1.3672709465026855,
582
+ "learning_rate": 4.894104109594466e-07,
583
+ "loss": 0.1242,
584
+ "step": 820
585
+ },
586
+ {
587
+ "epoch": 2.6517571884984026,
588
+ "grad_norm": 1.7629551887512207,
589
+ "learning_rate": 4.123349906194357e-07,
590
+ "loss": 0.1389,
591
+ "step": 830
592
+ },
593
+ {
594
+ "epoch": 2.68370607028754,
595
+ "grad_norm": 0.8762993812561035,
596
+ "learning_rate": 3.416001358759635e-07,
597
+ "loss": 0.1091,
598
+ "step": 840
599
+ },
600
+ {
601
+ "epoch": 2.7156549520766773,
602
+ "grad_norm": 1.4116370677947998,
603
+ "learning_rate": 2.7730360865923954e-07,
604
+ "loss": 0.1272,
605
+ "step": 850
606
+ },
607
+ {
608
+ "epoch": 2.747603833865815,
609
+ "grad_norm": 1.2336490154266357,
610
+ "learning_rate": 2.1953427255185122e-07,
611
+ "loss": 0.1056,
612
+ "step": 860
613
+ },
614
+ {
615
+ "epoch": 2.779552715654952,
616
+ "grad_norm": 1.4972121715545654,
617
+ "learning_rate": 1.6837196997130434e-07,
618
+ "loss": 0.1287,
619
+ "step": 870
620
+ },
621
+ {
622
+ "epoch": 2.8115015974440896,
623
+ "grad_norm": 1.169806957244873,
624
+ "learning_rate": 1.2388741182062348e-07,
625
+ "loss": 0.1094,
626
+ "step": 880
627
+ },
628
+ {
629
+ "epoch": 2.8434504792332267,
630
+ "grad_norm": 1.0118532180786133,
631
+ "learning_rate": 8.614207975952083e-08,
632
+ "loss": 0.1464,
633
+ "step": 890
634
+ },
635
+ {
636
+ "epoch": 2.8753993610223643,
637
+ "grad_norm": 1.9891964197158813,
638
+ "learning_rate": 5.518814123121885e-08,
639
+ "loss": 0.1388,
640
+ "step": 900
641
+ },
642
+ {
643
+ "epoch": 2.9073482428115014,
644
+ "grad_norm": 1.1589871644973755,
645
+ "learning_rate": 3.10683773623488e-08,
646
+ "loss": 0.1236,
647
+ "step": 910
648
+ },
649
+ {
650
+ "epoch": 2.939297124600639,
651
+ "grad_norm": 1.1757524013519287,
652
+ "learning_rate": 1.3816123835588835e-08,
653
+ "loss": 0.1044,
654
+ "step": 920
655
+ },
656
+ {
657
+ "epoch": 2.9712460063897765,
658
+ "grad_norm": 1.4213329553604126,
659
+ "learning_rate": 3.4552248167507576e-09,
660
+ "loss": 0.1423,
661
+ "step": 930
662
+ },
663
+ {
664
+ "epoch": 3.0,
665
+ "step": 939,
666
+ "total_flos": 1.8033509624132403e+17,
667
+ "train_loss": 0.5275962893256999,
668
+ "train_runtime": 463.0949,
669
+ "train_samples_per_second": 64.782,
670
+ "train_steps_per_second": 2.028
671
+ }
672
+ ],
673
+ "logging_steps": 10,
674
+ "max_steps": 939,
675
+ "num_input_tokens_seen": 0,
676
+ "num_train_epochs": 3,
677
+ "save_steps": 2000,
678
+ "stateful_callbacks": {
679
+ "TrainerControl": {
680
+ "args": {
681
+ "should_epoch_stop": false,
682
+ "should_evaluate": false,
683
+ "should_log": false,
684
+ "should_save": true,
685
+ "should_training_stop": true
686
+ },
687
+ "attributes": {}
688
+ }
689
+ },
690
+ "total_flos": 1.8033509624132403e+17,
691
+ "train_batch_size": 4,
692
+ "trial_name": null,
693
+ "trial_params": null
694
+ }
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/training_loss.png ADDED
csqa__qwen3_8b__all_variants_r8_bs4_lr1e5_e3/lora/sft/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: other
4
+ base_model: Qwen/Qwen3-8B
5
+ tags:
6
+ - llama-factory
7
+ - lora
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) on the csqa__qwen3_8b__n5000_t1t2_s0_bare_100_r8_bs4_lr1e5_e3_f_lora_sft dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-05
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - total_train_batch_size: 32
45
+ - total_eval_batch_size: 64
46
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 3
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.15.2
58
+ - Transformers 4.52.4
59
+ - Pytorch 2.7.0+cu126
60
+ - Datasets 3.6.0
61
+ - Tokenizers 0.21.1
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen3-8B",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.0,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 8,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "up_proj",
28
+ "gate_proj",
29
+ "q_proj",
30
+ "v_proj",
31
+ "down_proj",
32
+ "k_proj",
33
+ "o_proj"
34
+ ],
35
+ "task_type": "CAUSAL_LM",
36
+ "trainable_token_indices": null,
37
+ "use_dora": false,
38
+ "use_rslora": false
39
+ }
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc556cc0eef508eb774759488ae917fb6b312a0dc043c82d664d3bc4af111d45
3
+ size 87360584
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/added_tokens.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|box_end|>": 151649,
9
+ "<|box_start|>": 151648,
10
+ "<|endoftext|>": 151643,
11
+ "<|file_sep|>": 151664,
12
+ "<|fim_middle|>": 151660,
13
+ "<|fim_pad|>": 151662,
14
+ "<|fim_prefix|>": 151659,
15
+ "<|fim_suffix|>": 151661,
16
+ "<|im_end|>": 151645,
17
+ "<|im_start|>": 151644,
18
+ "<|image_pad|>": 151655,
19
+ "<|object_ref_end|>": 151647,
20
+ "<|object_ref_start|>": 151646,
21
+ "<|quad_end|>": 151651,
22
+ "<|quad_start|>": 151650,
23
+ "<|repo_name|>": 151663,
24
+ "<|video_pad|>": 151656,
25
+ "<|vision_end|>": 151653,
26
+ "<|vision_pad|>": 151654,
27
+ "<|vision_start|>": 151652
28
+ }
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "total_flos": 1.4737480102602342e+17,
4
+ "train_loss": 0.4462493950058731,
5
+ "train_runtime": 438.8238,
6
+ "train_samples_per_second": 68.365,
7
+ "train_steps_per_second": 2.14
8
+ }
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen3-8B
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc556cc0eef508eb774759488ae917fb6b312a0dc043c82d664d3bc4af111d45
3
+ size 87360584
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fa7927adc0d9f6d98c82900cc6eef5bee0ed1e6b231854e34dc1dc8c1d602ca
3
+ size 175012683
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0d51a89f706019e82a1bece244691bbef31c0dc9544212f828d427b8052b077
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9db2a9d890a3bdfb6477301adcccedc994df442a5cf1f4f617b4737086e632ef
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74f99367832c29a5aff6042c0dc14d87a3ffc16ce9cc6466678227421d213c43
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2984cd1dfb7f3fdddf5cc99bb1dab2759a14eef425d83154d76b310151f5d01c
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cb1eac572046e66de7e9c52bba4d61d758728542e4fdc6d8b194a6c29e10f96
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41da76e42d56dcd9ee4821dcdf66fd5f2e82e821081ec3e07cb7e1e952647992
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ef332606e64d74e7d5d444ae550c6edf2fe161722c5d652fd47845e2aa1894
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4b125a8a8f1fcea8543fd480628a947fbdff2ec9452d6a7be7e77a426d76b0c
3
+ size 16389
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70583c9a51d736f1353b66ad07c780d71300daef1dfeacad27c4a2efb4fd9d68
3
+ size 1465
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/checkpoint-939/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8659b993a9025fced8eda027b3eb2c8012015107d38086de64143bfb5704ef31
3
+ size 6161
csqa__qwen3_8b__bare_100_r8_bs4_lr1e5_e3/lora/sft/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }