scthornton commited on
Commit
d64e837
·
verified ·
1 Parent(s): e656a3a

Upload folder using huggingface_hub

Browse files
Files changed (45) hide show
  1. .gitattributes +4 -0
  2. README.md +181 -0
  3. adapter_config.json +246 -0
  4. adapter_model.safetensors +3 -0
  5. chat_template.jinja +266 -0
  6. checkpoint-121/README.md +209 -0
  7. checkpoint-121/adapter_config.json +246 -0
  8. checkpoint-121/adapter_model.safetensors +3 -0
  9. checkpoint-121/chat_template.jinja +266 -0
  10. checkpoint-121/optimizer.pt +3 -0
  11. checkpoint-121/processor_config.json +75 -0
  12. checkpoint-121/rng_state.pth +3 -0
  13. checkpoint-121/scheduler.pt +3 -0
  14. checkpoint-121/tokenizer.json +3 -0
  15. checkpoint-121/tokenizer_config.json +95 -0
  16. checkpoint-121/trainer_state.json +165 -0
  17. checkpoint-121/training_args.bin +3 -0
  18. checkpoint-242/README.md +209 -0
  19. checkpoint-242/adapter_config.json +246 -0
  20. checkpoint-242/adapter_model.safetensors +3 -0
  21. checkpoint-242/chat_template.jinja +266 -0
  22. checkpoint-242/optimizer.pt +3 -0
  23. checkpoint-242/processor_config.json +75 -0
  24. checkpoint-242/rng_state.pth +3 -0
  25. checkpoint-242/scheduler.pt +3 -0
  26. checkpoint-242/tokenizer.json +3 -0
  27. checkpoint-242/tokenizer_config.json +95 -0
  28. checkpoint-242/trainer_state.json +296 -0
  29. checkpoint-242/training_args.bin +3 -0
  30. checkpoint-363/README.md +209 -0
  31. checkpoint-363/adapter_config.json +246 -0
  32. checkpoint-363/adapter_model.safetensors +3 -0
  33. checkpoint-363/chat_template.jinja +266 -0
  34. checkpoint-363/optimizer.pt +3 -0
  35. checkpoint-363/processor_config.json +75 -0
  36. checkpoint-363/rng_state.pth +3 -0
  37. checkpoint-363/scheduler.pt +3 -0
  38. checkpoint-363/tokenizer.json +3 -0
  39. checkpoint-363/tokenizer_config.json +95 -0
  40. checkpoint-363/trainer_state.json +427 -0
  41. checkpoint-363/training_args.bin +3 -0
  42. processor_config.json +75 -0
  43. tokenizer.json +3 -0
  44. tokenizer_config.json +95 -0
  45. training_args.bin +3 -0
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-121/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-242/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-363/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: gemma
4
+ base_model: google/gemma-4-26b-a4b-it
5
+ tags:
6
+ - security
7
+ - secure-code
8
+ - cybersecurity
9
+ - qlora
10
+ - gemma4
11
+ - code-generation
12
+ - owasp
13
+ - ai-security
14
+ datasets:
15
+ - scthornton/securecode
16
+ - scthornton/securecode-web
17
+ pipeline_tag: text-generation
18
+ model-index:
19
+ - name: gemma4-26b-securecode
20
+ results: []
21
+ ---
22
+
23
+ # Gemma 4 26B-A4B SecureCode
24
+
25
+ **Security-specialized code generation model** fine-tuned on the [SecureCode](https://huggingface.co/datasets/scthornton/securecode) and [SecureCode Web](https://huggingface.co/datasets/scthornton/securecode-web) datasets.
26
+
27
+ Part of the [SecureCode model collection](https://huggingface.co/collections/scthornton/securecode) by [perfecXion.ai](https://perfecxion.ai).
28
+
29
+ ## Model Details
30
+
31
+ | Property | Value |
32
+ |----------|-------|
33
+ | **Base Model** | [google/gemma-4-26b-a4b-it](https://huggingface.co/google/gemma-4-26b-a4b-it) |
34
+ | **Architecture** | Gemma 4 Mixture-of-Experts (26B total, 4B active per token) |
35
+ | **Method** | QLoRA (4-bit NormalFloat quantization) |
36
+ | **Parameters Trained** | ~1-2% via LoRA adapters |
37
+ | **Tier** | Tier 3: Large Security Specialist |
38
+
39
+ ## Training Configuration
40
+
41
+ ### QLoRA Settings
42
+
43
+ | Parameter | Value |
44
+ |-----------|-------|
45
+ | Quantization | 4-bit NormalFloat (NF4) |
46
+ | Compute Dtype | bfloat16 |
47
+ | Double Quantization | Enabled |
48
+ | LoRA Rank | 16 |
49
+ | LoRA Alpha | 32 |
50
+ | LoRA Dropout | 0.05 |
51
+ | Target Modules | q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj |
52
+
53
+ ### Training Hyperparameters
54
+
55
+ | Parameter | Value |
56
+ |-----------|-------|
57
+ | Learning Rate | 2e-4 |
58
+ | LR Scheduler | Cosine with 100-step warmup |
59
+ | Epochs | 3 |
60
+ | Per-device Batch Size | 2 |
61
+ | Gradient Accumulation | 8x |
62
+ | Effective Batch Size | 16 |
63
+ | Max Sequence Length | 4,096 tokens |
64
+ | Optimizer | paged_adamw_8bit |
65
+ | Precision | bf16 |
66
+
67
+ ### Hardware
68
+
69
+ | Component | Specification |
70
+ |-----------|--------------|
71
+ | System | NVIDIA DGX Spark |
72
+ | GPU | NVIDIA GB10 |
73
+ | Memory | 128 GB Unified (CPU/GPU) |
74
+
75
+ ## Training Data
76
+
77
+ Combined and deduplicated from two datasets:
78
+
79
+ | Dataset | Examples | Focus |
80
+ |---------|----------|-------|
81
+ | [scthornton/securecode](https://huggingface.co/datasets/scthornton/securecode) | 2,185 | Web + AI/ML security (OWASP Top 10 2021 + LLM Top 10 2025) |
82
+ | [scthornton/securecode-web](https://huggingface.co/datasets/scthornton/securecode-web) | 1,378 | Web security with framework-specific patterns |
83
+
84
+ ### Coverage
85
+
86
+ **Vulnerability Standards:**
87
+ - OWASP Top 10 2021 (Web/Application Security)
88
+ - OWASP LLM Top 10 2025 (AI/ML Security)
89
+ - 92+ CWEs mapped
90
+
91
+ **Programming Languages:** Python, JavaScript, Java, Go, PHP, TypeScript, C#, Ruby, Rust, Kotlin, YAML, HCL
92
+
93
+ **Frameworks:** 49+ including LangChain, OpenAI, Anthropic, HuggingFace, Django, Express.js, Spring Boot, FastAPI, and more
94
+
95
+ **Training Format:** 4-turn conversational examples:
96
+ 1. Developer asks about implementing a feature
97
+ 2. Assistant provides vulnerable + secure implementations with attack demonstrations
98
+ 3. Developer asks about testing and edge cases
99
+ 4. Assistant delivers defense-in-depth operational guidance
100
+
101
+ Every example is grounded in real CVEs and published security incidents.
102
+
103
+ ## Usage
104
+
105
+ ```python
106
+ from peft import PeftModel
107
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
108
+ import torch
109
+
110
+ # Load with 4-bit quantization (matches training)
111
+ bnb_config = BitsAndBytesConfig(
112
+ load_in_4bit=True,
113
+ bnb_4bit_quant_type="nf4",
114
+ bnb_4bit_compute_dtype=torch.bfloat16,
115
+ )
116
+
117
+ base_model = AutoModelForCausalLM.from_pretrained(
118
+ "google/gemma-4-26b-a4b-it",
119
+ quantization_config=bnb_config,
120
+ device_map="auto",
121
+ )
122
+ tokenizer = AutoTokenizer.from_pretrained("scthornton/gemma4-26b-securecode")
123
+ model = PeftModel.from_pretrained(base_model, "scthornton/gemma4-26b-securecode")
124
+
125
+ messages = [
126
+ {"role": "user", "content": "How do I implement JWT authentication with refresh tokens in Python?"}
127
+ ]
128
+
129
+ inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to(model.device)
130
+ outputs = model.generate(inputs, max_new_tokens=2048, temperature=0.7)
131
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
132
+ ```
133
+
134
+ ## What Makes This Different
135
+
136
+ Standard code models generate functional but often **insecure** code. SecureCode-trained models:
137
+
138
+ - Generate **secure implementations by default** with proper input validation, parameterized queries, and cryptographic best practices
139
+ - Provide **vulnerable AND secure** code side-by-side so developers understand the risk
140
+ - Include **defense-in-depth guidance**: logging, monitoring, SIEM integration, and infrastructure hardening
141
+ - Cover **AI/ML-specific vulnerabilities**: prompt injection defenses, RAG security, model supply chain protection
142
+
143
+ ## SecureCode Model Collection
144
+
145
+ | Model | Parameters | Base |
146
+ |-------|-----------|------|
147
+ | [llama-3.2-3b-securecode](https://huggingface.co/scthornton/llama-3.2-3b-securecode) | 3B | Llama 3.2 3B |
148
+ | [codegemma-7b-securecode](https://huggingface.co/scthornton/codegemma-7b-securecode) | 7B | CodeGemma 7B IT |
149
+ | [deepseek-coder-6.7b-securecode](https://huggingface.co/scthornton/deepseek-coder-6.7b-securecode) | 6.7B | DeepSeek Coder |
150
+ | [qwen-coder-7b-securecode](https://huggingface.co/scthornton/qwen-coder-7b-securecode) | 7B | Qwen Coder 7B |
151
+ | [codellama-13b-securecode](https://huggingface.co/scthornton/codellama-13b-securecode) | 13B | Code Llama 13B |
152
+ | [qwen2.5-coder-14b-securecode](https://huggingface.co/scthornton/qwen2.5-coder-14b-securecode) | 14B | Qwen 2.5 Coder 14B |
153
+ | [starcoder2-15b-securecode](https://huggingface.co/scthornton/starcoder2-15b-securecode) | 15B | StarCoder2 15B |
154
+ | [granite-20b-code-securecode](https://huggingface.co/scthornton/granite-20b-code-securecode) | 20B | Granite 20B Code |
155
+ | **gemma4-26b-securecode** | **26B (4B active)** | **Gemma 4 26B-A4B IT** |
156
+
157
+ ## Limitations
158
+
159
+ - Training data focuses on defensive security patterns; not designed for offensive security tooling
160
+ - 4-turn conversation format may not generalize to all coding interaction patterns
161
+ - MoE architecture means only 4B parameters are active per token despite 26B total
162
+ - Security guidance reflects best practices as of early 2026; new vulnerabilities may not be covered
163
+
164
+ ## License
165
+
166
+ - **Model:** Gemma license (inherited from base model)
167
+ - **Dataset:** CC BY-NC-SA 4.0
168
+ - **Adapters:** CC BY-NC-SA 4.0
169
+
170
+ ## Citation
171
+
172
+ ```bibtex
173
+ @misc{thornton2026securecode,
174
+ title={SecureCode: A Production-Grade Multi-Turn Dataset for Training Security-Aware Code Generation Models},
175
+ author={Thornton, Scott},
176
+ year={2026},
177
+ publisher={perfecXion.ai},
178
+ url={https://huggingface.co/datasets/scthornton/securecode},
179
+ note={arXiv:2512.18542}
180
+ }
181
+ ```
adapter_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "google/gemma-4-26b-a4b-it",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.18.2.dev0@e7355a3b2233820f6f30e558ce133ed22673a087",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "model.language_model.layers.4.self_attn.k_proj",
34
+ "model.language_model.layers.17.self_attn.o_proj",
35
+ "model.language_model.layers.3.mlp.up_proj",
36
+ "model.language_model.layers.17.mlp.up_proj",
37
+ "model.language_model.layers.8.mlp.down_proj",
38
+ "model.language_model.layers.27.self_attn.k_proj",
39
+ "model.language_model.layers.28.mlp.down_proj",
40
+ "model.language_model.layers.6.mlp.up_proj",
41
+ "model.language_model.layers.24.self_attn.k_proj",
42
+ "model.language_model.layers.6.self_attn.q_proj",
43
+ "model.language_model.layers.17.self_attn.q_proj",
44
+ "model.language_model.layers.15.self_attn.k_proj",
45
+ "model.language_model.layers.24.mlp.up_proj",
46
+ "model.language_model.layers.19.mlp.gate_proj",
47
+ "model.language_model.layers.16.self_attn.k_proj",
48
+ "model.language_model.layers.26.self_attn.q_proj",
49
+ "model.language_model.layers.21.mlp.up_proj",
50
+ "model.language_model.layers.17.mlp.down_proj",
51
+ "model.language_model.layers.10.self_attn.v_proj",
52
+ "model.language_model.layers.25.mlp.down_proj",
53
+ "model.language_model.layers.11.mlp.up_proj",
54
+ "model.language_model.layers.2.self_attn.o_proj",
55
+ "model.language_model.layers.15.mlp.down_proj",
56
+ "model.language_model.layers.10.self_attn.k_proj",
57
+ "model.language_model.layers.15.self_attn.q_proj",
58
+ "model.language_model.layers.9.self_attn.v_proj",
59
+ "model.language_model.layers.27.self_attn.o_proj",
60
+ "model.language_model.layers.3.self_attn.v_proj",
61
+ "model.language_model.layers.10.self_attn.q_proj",
62
+ "model.language_model.layers.21.mlp.gate_proj",
63
+ "model.language_model.layers.25.self_attn.q_proj",
64
+ "model.language_model.layers.5.self_attn.o_proj",
65
+ "model.language_model.layers.2.mlp.gate_proj",
66
+ "model.language_model.layers.9.mlp.gate_proj",
67
+ "model.language_model.layers.19.self_attn.v_proj",
68
+ "model.language_model.layers.18.self_attn.k_proj",
69
+ "model.language_model.layers.19.mlp.down_proj",
70
+ "model.language_model.layers.23.self_attn.o_proj",
71
+ "model.language_model.layers.27.mlp.gate_proj",
72
+ "model.language_model.layers.0.mlp.up_proj",
73
+ "model.language_model.layers.20.mlp.gate_proj",
74
+ "model.language_model.layers.28.self_attn.o_proj",
75
+ "model.language_model.layers.4.self_attn.o_proj",
76
+ "model.language_model.layers.28.self_attn.v_proj",
77
+ "model.language_model.layers.11.self_attn.q_proj",
78
+ "model.language_model.layers.26.self_attn.o_proj",
79
+ "model.language_model.layers.9.mlp.down_proj",
80
+ "model.language_model.layers.27.self_attn.v_proj",
81
+ "model.language_model.layers.23.mlp.up_proj",
82
+ "model.language_model.layers.2.mlp.up_proj",
83
+ "model.language_model.layers.0.mlp.gate_proj",
84
+ "model.language_model.layers.18.self_attn.o_proj",
85
+ "model.language_model.layers.19.self_attn.k_proj",
86
+ "model.language_model.layers.10.mlp.down_proj",
87
+ "model.language_model.layers.10.mlp.gate_proj",
88
+ "model.language_model.layers.0.self_attn.o_proj",
89
+ "model.language_model.layers.20.mlp.down_proj",
90
+ "model.language_model.layers.10.self_attn.o_proj",
91
+ "model.language_model.layers.15.self_attn.o_proj",
92
+ "model.language_model.layers.18.mlp.down_proj",
93
+ "model.language_model.layers.1.self_attn.v_proj",
94
+ "model.language_model.layers.13.self_attn.q_proj",
95
+ "model.language_model.layers.18.self_attn.q_proj",
96
+ "model.language_model.layers.3.mlp.down_proj",
97
+ "model.language_model.layers.20.self_attn.k_proj",
98
+ "model.language_model.layers.14.self_attn.o_proj",
99
+ "model.language_model.layers.7.mlp.down_proj",
100
+ "model.language_model.layers.25.self_attn.v_proj",
101
+ "model.language_model.layers.29.mlp.gate_proj",
102
+ "model.language_model.layers.2.self_attn.k_proj",
103
+ "model.language_model.layers.5.self_attn.k_proj",
104
+ "model.language_model.layers.9.self_attn.k_proj",
105
+ "model.language_model.layers.1.mlp.gate_proj",
106
+ "model.language_model.layers.8.self_attn.o_proj",
107
+ "model.language_model.layers.22.self_attn.k_proj",
108
+ "model.language_model.layers.3.self_attn.q_proj",
109
+ "model.language_model.layers.23.self_attn.k_proj",
110
+ "model.language_model.layers.3.self_attn.k_proj",
111
+ "model.language_model.layers.19.self_attn.q_proj",
112
+ "model.language_model.layers.18.self_attn.v_proj",
113
+ "model.language_model.layers.10.mlp.up_proj",
114
+ "model.language_model.layers.11.mlp.gate_proj",
115
+ "model.language_model.layers.1.mlp.up_proj",
116
+ "model.language_model.layers.18.mlp.gate_proj",
117
+ "model.language_model.layers.8.mlp.gate_proj",
118
+ "model.language_model.layers.7.mlp.gate_proj",
119
+ "model.language_model.layers.8.mlp.up_proj",
120
+ "model.language_model.layers.5.self_attn.q_proj",
121
+ "model.language_model.layers.14.self_attn.k_proj",
122
+ "model.language_model.layers.22.self_attn.q_proj",
123
+ "model.language_model.layers.4.mlp.down_proj",
124
+ "model.language_model.layers.22.mlp.gate_proj",
125
+ "model.language_model.layers.15.self_attn.v_proj",
126
+ "model.language_model.layers.21.self_attn.o_proj",
127
+ "model.language_model.layers.11.self_attn.o_proj",
128
+ "model.language_model.layers.20.mlp.up_proj",
129
+ "model.language_model.layers.16.self_attn.q_proj",
130
+ "model.language_model.layers.1.self_attn.k_proj",
131
+ "model.language_model.layers.24.mlp.gate_proj",
132
+ "model.language_model.layers.26.mlp.gate_proj",
133
+ "model.language_model.layers.2.self_attn.q_proj",
134
+ "model.language_model.layers.4.mlp.gate_proj",
135
+ "model.language_model.layers.7.self_attn.q_proj",
136
+ "model.language_model.layers.14.self_attn.v_proj",
137
+ "model.language_model.layers.27.self_attn.q_proj",
138
+ "model.language_model.layers.29.mlp.up_proj",
139
+ "model.language_model.layers.28.self_attn.k_proj",
140
+ "model.language_model.layers.24.self_attn.o_proj",
141
+ "model.language_model.layers.26.self_attn.k_proj",
142
+ "model.language_model.layers.21.mlp.down_proj",
143
+ "model.language_model.layers.14.mlp.gate_proj",
144
+ "model.language_model.layers.25.mlp.up_proj",
145
+ "model.language_model.layers.27.mlp.down_proj",
146
+ "model.language_model.layers.20.self_attn.v_proj",
147
+ "model.language_model.layers.0.mlp.down_proj",
148
+ "model.language_model.layers.6.self_attn.v_proj",
149
+ "model.language_model.layers.4.self_attn.q_proj",
150
+ "model.language_model.layers.9.self_attn.q_proj",
151
+ "model.language_model.layers.0.self_attn.q_proj",
152
+ "model.language_model.layers.27.mlp.up_proj",
153
+ "model.language_model.layers.29.self_attn.k_proj",
154
+ "model.language_model.layers.29.self_attn.q_proj",
155
+ "model.language_model.layers.12.mlp.up_proj",
156
+ "model.language_model.layers.6.mlp.down_proj",
157
+ "model.language_model.layers.2.mlp.down_proj",
158
+ "model.language_model.layers.6.mlp.gate_proj",
159
+ "model.language_model.layers.24.self_attn.v_proj",
160
+ "model.language_model.layers.4.mlp.up_proj",
161
+ "model.language_model.layers.9.self_attn.o_proj",
162
+ "model.language_model.layers.22.self_attn.v_proj",
163
+ "model.language_model.layers.23.mlp.gate_proj",
164
+ "model.language_model.layers.5.mlp.down_proj",
165
+ "model.language_model.layers.13.self_attn.o_proj",
166
+ "model.language_model.layers.14.mlp.up_proj",
167
+ "model.language_model.layers.15.mlp.gate_proj",
168
+ "model.language_model.layers.19.self_attn.o_proj",
169
+ "model.language_model.layers.24.mlp.down_proj",
170
+ "model.language_model.layers.21.self_attn.q_proj",
171
+ "model.language_model.layers.15.mlp.up_proj",
172
+ "model.language_model.layers.26.mlp.up_proj",
173
+ "model.language_model.layers.26.mlp.down_proj",
174
+ "model.language_model.layers.25.self_attn.o_proj",
175
+ "model.language_model.layers.8.self_attn.v_proj",
176
+ "model.language_model.layers.12.self_attn.o_proj",
177
+ "model.language_model.layers.6.self_attn.k_proj",
178
+ "model.language_model.layers.17.mlp.gate_proj",
179
+ "model.language_model.layers.12.self_attn.k_proj",
180
+ "model.language_model.layers.13.mlp.down_proj",
181
+ "model.language_model.layers.1.mlp.down_proj",
182
+ "model.language_model.layers.3.mlp.gate_proj",
183
+ "model.language_model.layers.14.mlp.down_proj",
184
+ "model.language_model.layers.9.mlp.up_proj",
185
+ "model.language_model.layers.21.self_attn.k_proj",
186
+ "model.language_model.layers.6.self_attn.o_proj",
187
+ "model.language_model.layers.0.self_attn.v_proj",
188
+ "model.language_model.layers.16.mlp.down_proj",
189
+ "model.language_model.layers.8.self_attn.k_proj",
190
+ "model.language_model.layers.12.mlp.gate_proj",
191
+ "model.language_model.layers.7.self_attn.o_proj",
192
+ "model.language_model.layers.18.mlp.up_proj",
193
+ "model.language_model.layers.13.mlp.up_proj",
194
+ "model.language_model.layers.16.mlp.up_proj",
195
+ "model.language_model.layers.17.self_attn.k_proj",
196
+ "model.language_model.layers.25.self_attn.k_proj",
197
+ "model.language_model.layers.8.self_attn.q_proj",
198
+ "model.language_model.layers.4.self_attn.v_proj",
199
+ "model.language_model.layers.23.self_attn.q_proj",
200
+ "model.language_model.layers.1.self_attn.o_proj",
201
+ "model.language_model.layers.5.mlp.up_proj",
202
+ "model.language_model.layers.13.self_attn.k_proj",
203
+ "model.language_model.layers.7.self_attn.k_proj",
204
+ "model.language_model.layers.22.self_attn.o_proj",
205
+ "model.language_model.layers.22.mlp.up_proj",
206
+ "model.language_model.layers.16.self_attn.o_proj",
207
+ "model.language_model.layers.24.self_attn.q_proj",
208
+ "model.language_model.layers.12.self_attn.q_proj",
209
+ "model.language_model.layers.2.self_attn.v_proj",
210
+ "model.language_model.layers.12.self_attn.v_proj",
211
+ "model.language_model.layers.13.mlp.gate_proj",
212
+ "model.language_model.layers.12.mlp.down_proj",
213
+ "model.language_model.layers.14.self_attn.q_proj",
214
+ "model.language_model.layers.26.self_attn.v_proj",
215
+ "model.language_model.layers.28.mlp.up_proj",
216
+ "model.language_model.layers.19.mlp.up_proj",
217
+ "model.language_model.layers.16.mlp.gate_proj",
218
+ "model.language_model.layers.7.self_attn.v_proj",
219
+ "model.language_model.layers.25.mlp.gate_proj",
220
+ "model.language_model.layers.13.self_attn.v_proj",
221
+ "model.language_model.layers.20.self_attn.q_proj",
222
+ "model.language_model.layers.5.mlp.gate_proj",
223
+ "model.language_model.layers.1.self_attn.q_proj",
224
+ "model.language_model.layers.11.mlp.down_proj",
225
+ "model.language_model.layers.0.self_attn.k_proj",
226
+ "model.language_model.layers.21.self_attn.v_proj",
227
+ "model.language_model.layers.28.self_attn.q_proj",
228
+ "model.language_model.layers.29.self_attn.o_proj",
229
+ "model.language_model.layers.11.self_attn.k_proj",
230
+ "model.language_model.layers.29.mlp.down_proj",
231
+ "model.language_model.layers.7.mlp.up_proj",
232
+ "model.language_model.layers.22.mlp.down_proj",
233
+ "model.language_model.layers.20.self_attn.o_proj",
234
+ "model.language_model.layers.3.self_attn.o_proj",
235
+ "model.language_model.layers.23.mlp.down_proj",
236
+ "model.language_model.layers.16.self_attn.v_proj",
237
+ "model.language_model.layers.28.mlp.gate_proj"
238
+ ],
239
+ "target_parameters": null,
240
+ "task_type": "CAUSAL_LM",
241
+ "trainable_token_indices": null,
242
+ "use_bdlora": null,
243
+ "use_dora": false,
244
+ "use_qalora": false,
245
+ "use_rslora": false
246
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd22f3389135f43d99a3e2a496b4df257c053e7777182b566f577e496b885a88
3
+ size 74403016
chat_template.jinja ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- set add_comma = false -%}
6
+ {%- if key not in standard_keys -%}
7
+ {%- if ns.found_first %},{% endif -%}
8
+ {%- set ns.found_first = true -%}
9
+ {{ key }}:{
10
+ {%- if value['description'] -%}
11
+ description:<|"|>{{ value['description'] }}<|"|>
12
+ {%- set add_comma = true -%}
13
+ {%- endif -%}
14
+ {%- if value['nullable'] %}
15
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
16
+ nullable:true
17
+ {%- endif -%}
18
+ {%- if value['type'] | upper == 'STRING' -%}
19
+ {%- if value['enum'] -%}
20
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
21
+ enum:{{ format_argument(value['enum']) }}
22
+ {%- endif -%}
23
+ {%- elif value['type'] | upper == 'OBJECT' -%}
24
+ ,properties:{
25
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
26
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
27
+ {%- elif value is mapping -%}
28
+ {{- format_parameters(value, value['required'] | default([])) -}}
29
+ {%- endif -%}
30
+ }
31
+ {%- if value['required'] -%}
32
+ ,required:[
33
+ {%- for item in value['required'] | default([]) -%}
34
+ <|"|>{{- item -}}<|"|>
35
+ {%- if not loop.last %},{% endif -%}
36
+ {%- endfor -%}
37
+ ]
38
+ {%- endif -%}
39
+ {%- elif value['type'] | upper == 'ARRAY' -%}
40
+ {%- if value['items'] is mapping and value['items'] -%}
41
+ ,items:{
42
+ {%- set ns_items = namespace(found_first=false) -%}
43
+ {%- for item_key, item_value in value['items'] | dictsort -%}
44
+ {%- if item_value is not none -%}
45
+ {%- if ns_items.found_first %},{% endif -%}
46
+ {%- set ns_items.found_first = true -%}
47
+ {%- if item_key == 'properties' -%}
48
+ properties:{
49
+ {%- if item_value is mapping -%}
50
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
51
+ {%- endif -%}
52
+ }
53
+ {%- elif item_key == 'required' -%}
54
+ required:[
55
+ {%- for req_item in item_value -%}
56
+ <|"|>{{- req_item -}}<|"|>
57
+ {%- if not loop.last %},{% endif -%}
58
+ {%- endfor -%}
59
+ ]
60
+ {%- elif item_key == 'type' -%}
61
+ {%- if item_value is string -%}
62
+ type:{{ format_argument(item_value | upper) }}
63
+ {%- else -%}
64
+ type:{{ format_argument(item_value | map('upper') | list) }}
65
+ {%- endif -%}
66
+ {%- else -%}
67
+ {{ item_key }}:{{ format_argument(item_value) }}
68
+ {%- endif -%}
69
+ {%- endif -%}
70
+ {%- endfor -%}
71
+ }
72
+ {%- endif -%}
73
+ {%- endif -%}
74
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
75
+ type:<|"|>{{ value['type'] | upper }}<|"|>}
76
+ {%- endif -%}
77
+ {%- endfor -%}
78
+ {%- endmacro -%}
79
+ {%- macro format_function_declaration(tool_data) -%}
80
+ declaration:{{- tool_data['function']['name'] -}}{description:<|"|>{{- tool_data['function']['description'] -}}<|"|>
81
+ {%- set params = tool_data['function']['parameters'] -%}
82
+ {%- if params -%}
83
+ ,parameters:{
84
+ {%- if params['properties'] -%}
85
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
86
+ {%- endif -%}
87
+ {%- if params['required'] -%}
88
+ required:[
89
+ {%- for item in params['required'] -%}
90
+ <|"|>{{- item -}}<|"|>
91
+ {{- ',' if not loop.last -}}
92
+ {%- endfor -%}
93
+ ],
94
+ {%- endif -%}
95
+ {%- if params['type'] -%}
96
+ type:<|"|>{{- params['type'] | upper -}}<|"|>}
97
+ {%- endif -%}
98
+ {%- endif -%}
99
+ {%- if 'response' in tool_data['function'] -%}
100
+ {%- set response_declaration = tool_data['function']['response'] -%}
101
+ ,response:{
102
+ {%- if response_declaration['description'] -%}
103
+ description:<|"|>{{- response_declaration['description'] -}}<|"|>,
104
+ {%- endif -%}
105
+ {%- if response_declaration['type'] | upper == 'OBJECT' -%}
106
+ type:<|"|>{{- response_declaration['type'] | upper -}}<|"|>}
107
+ {%- endif -%}
108
+ {%- endif -%}
109
+ }
110
+ {%- endmacro -%}
111
+ {%- macro format_argument(argument, escape_keys=True) -%}
112
+ {%- if argument is string -%}
113
+ {{- '<|"|>' + argument + '<|"|>' -}}
114
+ {%- elif argument is boolean -%}
115
+ {{- 'true' if argument else 'false' -}}
116
+ {%- elif argument is mapping -%}
117
+ {{- '{' -}}
118
+ {%- set ns = namespace(found_first=false) -%}
119
+ {%- for key, value in argument | dictsort -%}
120
+ {%- if ns.found_first %},{% endif -%}
121
+ {%- set ns.found_first = true -%}
122
+ {%- if escape_keys -%}
123
+ {{- '<|"|>' + key + '<|"|>' -}}
124
+ {%- else -%}
125
+ {{- key -}}
126
+ {%- endif -%}
127
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
128
+ {%- endfor -%}
129
+ {{- '}' -}}
130
+ {%- elif argument is sequence -%}
131
+ {{- '[' -}}
132
+ {%- for item in argument -%}
133
+ {{- format_argument(item, escape_keys=escape_keys) -}}
134
+ {%- if not loop.last %},{% endif -%}
135
+ {%- endfor -%}
136
+ {{- ']' -}}
137
+ {%- else -%}
138
+ {{- argument -}}
139
+ {%- endif -%}
140
+ {%- endmacro -%}
141
+ {%- macro strip_thinking(text) -%}
142
+ {%- set ns = namespace(result='') -%}
143
+ {%- for part in text.split('<channel|>') -%}
144
+ {%- if '<|channel>' in part -%}
145
+ {%- set ns.result = ns.result + part.split('<|channel>')[0] -%}
146
+ {%- else -%}
147
+ {%- set ns.result = ns.result + part -%}
148
+ {%- endif -%}
149
+ {%- endfor -%}
150
+ {{- ns.result | trim -}}
151
+ {%- endmacro -%}
152
+
153
+ {%- set ns = namespace(prev_message_type=None) -%}
154
+ {%- set loop_messages = messages -%}
155
+ {{ bos_token }}
156
+ {#- Handle System/Tool Definitions Block -#}
157
+ {%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}
158
+ {{- '<|turn>system\n' -}}
159
+
160
+ {#- Inject Thinking token at the very top of the FIRST system turn -#}
161
+ {%- if enable_thinking is defined and enable_thinking -%}
162
+ {{- '<|think|>' -}}
163
+ {%- set ns.prev_message_type = 'think' -%}
164
+ {%- endif -%}
165
+
166
+ {%- if messages[0]['role'] in ['system', 'developer'] -%}
167
+ {{- messages[0]['content'] | trim -}}
168
+ {%- set loop_messages = messages[1:] -%}
169
+ {%- endif -%}
170
+
171
+ {%- if tools -%}
172
+ {%- for tool in tools %}
173
+ {{- '<|tool>' -}}
174
+ {{- format_function_declaration(tool) | trim -}}
175
+ {{- '<tool|>' -}}
176
+ {%- endfor %}
177
+ {%- set ns.prev_message_type = 'tool' -%}
178
+ {%- endif -%}
179
+
180
+ {{- '<turn|>\n' -}}
181
+ {%- endif %}
182
+
183
+ {#- Loop through messages -#}
184
+ {%- for message in loop_messages -%}
185
+ {%- set ns.prev_message_type = None -%}
186
+ {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}
187
+ {{- '<|turn>' + role + '\n' }}
188
+
189
+ {%- if message['tool_calls'] -%}
190
+ {%- for tool_call in message['tool_calls'] -%}
191
+ {%- set function = tool_call['function'] -%}
192
+ {{- '<|tool_call>call:' + function['name'] + '{' -}}
193
+ {%- if function['arguments'] is mapping -%}
194
+ {%- set ns_args = namespace(found_first=false) -%}
195
+ {%- for key, value in function['arguments'] | dictsort -%}
196
+ {%- if ns_args.found_first %},{% endif -%}
197
+ {%- set ns_args.found_first = true -%}
198
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
199
+ {%- endfor -%}
200
+ {%- elif function['arguments'] is string -%}
201
+ {{- function['arguments'] -}}
202
+ {%- endif -%}
203
+ {{- '}<tool_call|>' -}}
204
+ {%- endfor -%}
205
+ {%- set ns.prev_message_type = 'tool_call' -%}
206
+ {%- endif -%}
207
+
208
+ {%- if message['tool_responses'] -%}
209
+ {#- Tool Response handling -#}
210
+ {%- for tool_response in message['tool_responses'] -%}
211
+ {{- '<|tool_response>' -}}
212
+ {%- if tool_response['response'] is mapping -%}
213
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{' -}}
214
+ {%- for key, value in tool_response['response'] | dictsort -%}
215
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
216
+ {%- if not loop.last %},{% endif -%}
217
+ {%- endfor -%}
218
+ {{- '}' -}}
219
+ {%- else -%}
220
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{value:' + format_argument(tool_response['response'], escape_keys=False) + '}' -}}
221
+ {%- endif -%}
222
+ {{- '<tool_response|>' -}}
223
+ {%- endfor -%}
224
+ {%- set ns.prev_message_type = 'tool_response' -%}
225
+ {%- endif -%}
226
+
227
+ {%- if message['content'] is string -%}
228
+ {%- if role == 'model' -%}
229
+ {{- strip_thinking(message['content']) -}}
230
+ {%- else -%}
231
+ {{- message['content'] | trim -}}
232
+ {%- endif -%}
233
+ {%- elif message['content'] is sequence -%}
234
+ {%- for item in message['content'] -%}
235
+ {%- if item['type'] == 'text' -%}
236
+ {%- if role == 'model' -%}
237
+ {{- strip_thinking(item['text']) -}}
238
+ {%- else -%}
239
+ {{- item['text'] | trim -}}
240
+ {%- endif -%}
241
+ {%- elif item['type'] == 'image' -%}
242
+ {{- '\n\n<|image|>\n\n' -}}
243
+ {%- set ns.prev_message_type = 'image' -%}
244
+ {%- elif item['type'] == 'audio' -%}
245
+ {{- '<|audio|>' -}}
246
+ {%- set ns.prev_message_type = 'audio' -%}
247
+ {%- elif item['type'] == 'video' -%}
248
+ {{- '\n\n<|video|>\n\n' -}}
249
+ {%- set ns.prev_message_type = 'video' -%}
250
+ {%- endif -%}
251
+ {%- endfor -%}
252
+ {%- endif -%}
253
+
254
+ {%- if not (message['tool_responses'] and not message['content']) -%}
255
+ {{- '<turn|>\n' -}}
256
+ {%- endif -%}
257
+ {%- endfor -%}
258
+
259
+ {%- if add_generation_prompt -%}
260
+ {%- if ns.prev_message_type != 'tool_response' -%}
261
+ {{- '<|turn>model\n' -}}
262
+ {%- endif -%}
263
+ {%- if not enable_thinking | default(false) -%}
264
+ {{- '<|channel>thought\n<channel|>' -}}
265
+ {%- endif -%}
266
+ {%- endif -%}
checkpoint-121/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: google/gemma-4-26b-a4b-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:google/gemma-4-26b-a4b-it
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.2.dev0
checkpoint-121/adapter_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "google/gemma-4-26b-a4b-it",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.18.2.dev0@e7355a3b2233820f6f30e558ce133ed22673a087",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "model.language_model.layers.4.self_attn.k_proj",
34
+ "model.language_model.layers.17.self_attn.o_proj",
35
+ "model.language_model.layers.3.mlp.up_proj",
36
+ "model.language_model.layers.17.mlp.up_proj",
37
+ "model.language_model.layers.8.mlp.down_proj",
38
+ "model.language_model.layers.27.self_attn.k_proj",
39
+ "model.language_model.layers.28.mlp.down_proj",
40
+ "model.language_model.layers.6.mlp.up_proj",
41
+ "model.language_model.layers.24.self_attn.k_proj",
42
+ "model.language_model.layers.6.self_attn.q_proj",
43
+ "model.language_model.layers.17.self_attn.q_proj",
44
+ "model.language_model.layers.15.self_attn.k_proj",
45
+ "model.language_model.layers.24.mlp.up_proj",
46
+ "model.language_model.layers.19.mlp.gate_proj",
47
+ "model.language_model.layers.16.self_attn.k_proj",
48
+ "model.language_model.layers.26.self_attn.q_proj",
49
+ "model.language_model.layers.21.mlp.up_proj",
50
+ "model.language_model.layers.17.mlp.down_proj",
51
+ "model.language_model.layers.10.self_attn.v_proj",
52
+ "model.language_model.layers.25.mlp.down_proj",
53
+ "model.language_model.layers.11.mlp.up_proj",
54
+ "model.language_model.layers.2.self_attn.o_proj",
55
+ "model.language_model.layers.15.mlp.down_proj",
56
+ "model.language_model.layers.10.self_attn.k_proj",
57
+ "model.language_model.layers.15.self_attn.q_proj",
58
+ "model.language_model.layers.9.self_attn.v_proj",
59
+ "model.language_model.layers.27.self_attn.o_proj",
60
+ "model.language_model.layers.3.self_attn.v_proj",
61
+ "model.language_model.layers.10.self_attn.q_proj",
62
+ "model.language_model.layers.21.mlp.gate_proj",
63
+ "model.language_model.layers.25.self_attn.q_proj",
64
+ "model.language_model.layers.5.self_attn.o_proj",
65
+ "model.language_model.layers.2.mlp.gate_proj",
66
+ "model.language_model.layers.9.mlp.gate_proj",
67
+ "model.language_model.layers.19.self_attn.v_proj",
68
+ "model.language_model.layers.18.self_attn.k_proj",
69
+ "model.language_model.layers.19.mlp.down_proj",
70
+ "model.language_model.layers.23.self_attn.o_proj",
71
+ "model.language_model.layers.27.mlp.gate_proj",
72
+ "model.language_model.layers.0.mlp.up_proj",
73
+ "model.language_model.layers.20.mlp.gate_proj",
74
+ "model.language_model.layers.28.self_attn.o_proj",
75
+ "model.language_model.layers.4.self_attn.o_proj",
76
+ "model.language_model.layers.28.self_attn.v_proj",
77
+ "model.language_model.layers.11.self_attn.q_proj",
78
+ "model.language_model.layers.26.self_attn.o_proj",
79
+ "model.language_model.layers.9.mlp.down_proj",
80
+ "model.language_model.layers.27.self_attn.v_proj",
81
+ "model.language_model.layers.23.mlp.up_proj",
82
+ "model.language_model.layers.2.mlp.up_proj",
83
+ "model.language_model.layers.0.mlp.gate_proj",
84
+ "model.language_model.layers.18.self_attn.o_proj",
85
+ "model.language_model.layers.19.self_attn.k_proj",
86
+ "model.language_model.layers.10.mlp.down_proj",
87
+ "model.language_model.layers.10.mlp.gate_proj",
88
+ "model.language_model.layers.0.self_attn.o_proj",
89
+ "model.language_model.layers.20.mlp.down_proj",
90
+ "model.language_model.layers.10.self_attn.o_proj",
91
+ "model.language_model.layers.15.self_attn.o_proj",
92
+ "model.language_model.layers.18.mlp.down_proj",
93
+ "model.language_model.layers.1.self_attn.v_proj",
94
+ "model.language_model.layers.13.self_attn.q_proj",
95
+ "model.language_model.layers.18.self_attn.q_proj",
96
+ "model.language_model.layers.3.mlp.down_proj",
97
+ "model.language_model.layers.20.self_attn.k_proj",
98
+ "model.language_model.layers.14.self_attn.o_proj",
99
+ "model.language_model.layers.7.mlp.down_proj",
100
+ "model.language_model.layers.25.self_attn.v_proj",
101
+ "model.language_model.layers.29.mlp.gate_proj",
102
+ "model.language_model.layers.2.self_attn.k_proj",
103
+ "model.language_model.layers.5.self_attn.k_proj",
104
+ "model.language_model.layers.9.self_attn.k_proj",
105
+ "model.language_model.layers.1.mlp.gate_proj",
106
+ "model.language_model.layers.8.self_attn.o_proj",
107
+ "model.language_model.layers.22.self_attn.k_proj",
108
+ "model.language_model.layers.3.self_attn.q_proj",
109
+ "model.language_model.layers.23.self_attn.k_proj",
110
+ "model.language_model.layers.3.self_attn.k_proj",
111
+ "model.language_model.layers.19.self_attn.q_proj",
112
+ "model.language_model.layers.18.self_attn.v_proj",
113
+ "model.language_model.layers.10.mlp.up_proj",
114
+ "model.language_model.layers.11.mlp.gate_proj",
115
+ "model.language_model.layers.1.mlp.up_proj",
116
+ "model.language_model.layers.18.mlp.gate_proj",
117
+ "model.language_model.layers.8.mlp.gate_proj",
118
+ "model.language_model.layers.7.mlp.gate_proj",
119
+ "model.language_model.layers.8.mlp.up_proj",
120
+ "model.language_model.layers.5.self_attn.q_proj",
121
+ "model.language_model.layers.14.self_attn.k_proj",
122
+ "model.language_model.layers.22.self_attn.q_proj",
123
+ "model.language_model.layers.4.mlp.down_proj",
124
+ "model.language_model.layers.22.mlp.gate_proj",
125
+ "model.language_model.layers.15.self_attn.v_proj",
126
+ "model.language_model.layers.21.self_attn.o_proj",
127
+ "model.language_model.layers.11.self_attn.o_proj",
128
+ "model.language_model.layers.20.mlp.up_proj",
129
+ "model.language_model.layers.16.self_attn.q_proj",
130
+ "model.language_model.layers.1.self_attn.k_proj",
131
+ "model.language_model.layers.24.mlp.gate_proj",
132
+ "model.language_model.layers.26.mlp.gate_proj",
133
+ "model.language_model.layers.2.self_attn.q_proj",
134
+ "model.language_model.layers.4.mlp.gate_proj",
135
+ "model.language_model.layers.7.self_attn.q_proj",
136
+ "model.language_model.layers.14.self_attn.v_proj",
137
+ "model.language_model.layers.27.self_attn.q_proj",
138
+ "model.language_model.layers.29.mlp.up_proj",
139
+ "model.language_model.layers.28.self_attn.k_proj",
140
+ "model.language_model.layers.24.self_attn.o_proj",
141
+ "model.language_model.layers.26.self_attn.k_proj",
142
+ "model.language_model.layers.21.mlp.down_proj",
143
+ "model.language_model.layers.14.mlp.gate_proj",
144
+ "model.language_model.layers.25.mlp.up_proj",
145
+ "model.language_model.layers.27.mlp.down_proj",
146
+ "model.language_model.layers.20.self_attn.v_proj",
147
+ "model.language_model.layers.0.mlp.down_proj",
148
+ "model.language_model.layers.6.self_attn.v_proj",
149
+ "model.language_model.layers.4.self_attn.q_proj",
150
+ "model.language_model.layers.9.self_attn.q_proj",
151
+ "model.language_model.layers.0.self_attn.q_proj",
152
+ "model.language_model.layers.27.mlp.up_proj",
153
+ "model.language_model.layers.29.self_attn.k_proj",
154
+ "model.language_model.layers.29.self_attn.q_proj",
155
+ "model.language_model.layers.12.mlp.up_proj",
156
+ "model.language_model.layers.6.mlp.down_proj",
157
+ "model.language_model.layers.2.mlp.down_proj",
158
+ "model.language_model.layers.6.mlp.gate_proj",
159
+ "model.language_model.layers.24.self_attn.v_proj",
160
+ "model.language_model.layers.4.mlp.up_proj",
161
+ "model.language_model.layers.9.self_attn.o_proj",
162
+ "model.language_model.layers.22.self_attn.v_proj",
163
+ "model.language_model.layers.23.mlp.gate_proj",
164
+ "model.language_model.layers.5.mlp.down_proj",
165
+ "model.language_model.layers.13.self_attn.o_proj",
166
+ "model.language_model.layers.14.mlp.up_proj",
167
+ "model.language_model.layers.15.mlp.gate_proj",
168
+ "model.language_model.layers.19.self_attn.o_proj",
169
+ "model.language_model.layers.24.mlp.down_proj",
170
+ "model.language_model.layers.21.self_attn.q_proj",
171
+ "model.language_model.layers.15.mlp.up_proj",
172
+ "model.language_model.layers.26.mlp.up_proj",
173
+ "model.language_model.layers.26.mlp.down_proj",
174
+ "model.language_model.layers.25.self_attn.o_proj",
175
+ "model.language_model.layers.8.self_attn.v_proj",
176
+ "model.language_model.layers.12.self_attn.o_proj",
177
+ "model.language_model.layers.6.self_attn.k_proj",
178
+ "model.language_model.layers.17.mlp.gate_proj",
179
+ "model.language_model.layers.12.self_attn.k_proj",
180
+ "model.language_model.layers.13.mlp.down_proj",
181
+ "model.language_model.layers.1.mlp.down_proj",
182
+ "model.language_model.layers.3.mlp.gate_proj",
183
+ "model.language_model.layers.14.mlp.down_proj",
184
+ "model.language_model.layers.9.mlp.up_proj",
185
+ "model.language_model.layers.21.self_attn.k_proj",
186
+ "model.language_model.layers.6.self_attn.o_proj",
187
+ "model.language_model.layers.0.self_attn.v_proj",
188
+ "model.language_model.layers.16.mlp.down_proj",
189
+ "model.language_model.layers.8.self_attn.k_proj",
190
+ "model.language_model.layers.12.mlp.gate_proj",
191
+ "model.language_model.layers.7.self_attn.o_proj",
192
+ "model.language_model.layers.18.mlp.up_proj",
193
+ "model.language_model.layers.13.mlp.up_proj",
194
+ "model.language_model.layers.16.mlp.up_proj",
195
+ "model.language_model.layers.17.self_attn.k_proj",
196
+ "model.language_model.layers.25.self_attn.k_proj",
197
+ "model.language_model.layers.8.self_attn.q_proj",
198
+ "model.language_model.layers.4.self_attn.v_proj",
199
+ "model.language_model.layers.23.self_attn.q_proj",
200
+ "model.language_model.layers.1.self_attn.o_proj",
201
+ "model.language_model.layers.5.mlp.up_proj",
202
+ "model.language_model.layers.13.self_attn.k_proj",
203
+ "model.language_model.layers.7.self_attn.k_proj",
204
+ "model.language_model.layers.22.self_attn.o_proj",
205
+ "model.language_model.layers.22.mlp.up_proj",
206
+ "model.language_model.layers.16.self_attn.o_proj",
207
+ "model.language_model.layers.24.self_attn.q_proj",
208
+ "model.language_model.layers.12.self_attn.q_proj",
209
+ "model.language_model.layers.2.self_attn.v_proj",
210
+ "model.language_model.layers.12.self_attn.v_proj",
211
+ "model.language_model.layers.13.mlp.gate_proj",
212
+ "model.language_model.layers.12.mlp.down_proj",
213
+ "model.language_model.layers.14.self_attn.q_proj",
214
+ "model.language_model.layers.26.self_attn.v_proj",
215
+ "model.language_model.layers.28.mlp.up_proj",
216
+ "model.language_model.layers.19.mlp.up_proj",
217
+ "model.language_model.layers.16.mlp.gate_proj",
218
+ "model.language_model.layers.7.self_attn.v_proj",
219
+ "model.language_model.layers.25.mlp.gate_proj",
220
+ "model.language_model.layers.13.self_attn.v_proj",
221
+ "model.language_model.layers.20.self_attn.q_proj",
222
+ "model.language_model.layers.5.mlp.gate_proj",
223
+ "model.language_model.layers.1.self_attn.q_proj",
224
+ "model.language_model.layers.11.mlp.down_proj",
225
+ "model.language_model.layers.0.self_attn.k_proj",
226
+ "model.language_model.layers.21.self_attn.v_proj",
227
+ "model.language_model.layers.28.self_attn.q_proj",
228
+ "model.language_model.layers.29.self_attn.o_proj",
229
+ "model.language_model.layers.11.self_attn.k_proj",
230
+ "model.language_model.layers.29.mlp.down_proj",
231
+ "model.language_model.layers.7.mlp.up_proj",
232
+ "model.language_model.layers.22.mlp.down_proj",
233
+ "model.language_model.layers.20.self_attn.o_proj",
234
+ "model.language_model.layers.3.self_attn.o_proj",
235
+ "model.language_model.layers.23.mlp.down_proj",
236
+ "model.language_model.layers.16.self_attn.v_proj",
237
+ "model.language_model.layers.28.mlp.gate_proj"
238
+ ],
239
+ "target_parameters": null,
240
+ "task_type": "CAUSAL_LM",
241
+ "trainable_token_indices": null,
242
+ "use_bdlora": null,
243
+ "use_dora": false,
244
+ "use_qalora": false,
245
+ "use_rslora": false
246
+ }
checkpoint-121/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75c05a4dbd6f957da19a4fcd5fa768f553c1cf5fa86a54f286530b8f94bd4e89
3
+ size 37232104
checkpoint-121/chat_template.jinja ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- set add_comma = false -%}
6
+ {%- if key not in standard_keys -%}
7
+ {%- if ns.found_first %},{% endif -%}
8
+ {%- set ns.found_first = true -%}
9
+ {{ key }}:{
10
+ {%- if value['description'] -%}
11
+ description:<|"|>{{ value['description'] }}<|"|>
12
+ {%- set add_comma = true -%}
13
+ {%- endif -%}
14
+ {%- if value['nullable'] %}
15
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
16
+ nullable:true
17
+ {%- endif -%}
18
+ {%- if value['type'] | upper == 'STRING' -%}
19
+ {%- if value['enum'] -%}
20
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
21
+ enum:{{ format_argument(value['enum']) }}
22
+ {%- endif -%}
23
+ {%- elif value['type'] | upper == 'OBJECT' -%}
24
+ ,properties:{
25
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
26
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
27
+ {%- elif value is mapping -%}
28
+ {{- format_parameters(value, value['required'] | default([])) -}}
29
+ {%- endif -%}
30
+ }
31
+ {%- if value['required'] -%}
32
+ ,required:[
33
+ {%- for item in value['required'] | default([]) -%}
34
+ <|"|>{{- item -}}<|"|>
35
+ {%- if not loop.last %},{% endif -%}
36
+ {%- endfor -%}
37
+ ]
38
+ {%- endif -%}
39
+ {%- elif value['type'] | upper == 'ARRAY' -%}
40
+ {%- if value['items'] is mapping and value['items'] -%}
41
+ ,items:{
42
+ {%- set ns_items = namespace(found_first=false) -%}
43
+ {%- for item_key, item_value in value['items'] | dictsort -%}
44
+ {%- if item_value is not none -%}
45
+ {%- if ns_items.found_first %},{% endif -%}
46
+ {%- set ns_items.found_first = true -%}
47
+ {%- if item_key == 'properties' -%}
48
+ properties:{
49
+ {%- if item_value is mapping -%}
50
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
51
+ {%- endif -%}
52
+ }
53
+ {%- elif item_key == 'required' -%}
54
+ required:[
55
+ {%- for req_item in item_value -%}
56
+ <|"|>{{- req_item -}}<|"|>
57
+ {%- if not loop.last %},{% endif -%}
58
+ {%- endfor -%}
59
+ ]
60
+ {%- elif item_key == 'type' -%}
61
+ {%- if item_value is string -%}
62
+ type:{{ format_argument(item_value | upper) }}
63
+ {%- else -%}
64
+ type:{{ format_argument(item_value | map('upper') | list) }}
65
+ {%- endif -%}
66
+ {%- else -%}
67
+ {{ item_key }}:{{ format_argument(item_value) }}
68
+ {%- endif -%}
69
+ {%- endif -%}
70
+ {%- endfor -%}
71
+ }
72
+ {%- endif -%}
73
+ {%- endif -%}
74
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
75
+ type:<|"|>{{ value['type'] | upper }}<|"|>}
76
+ {%- endif -%}
77
+ {%- endfor -%}
78
+ {%- endmacro -%}
79
+ {%- macro format_function_declaration(tool_data) -%}
80
+ declaration:{{- tool_data['function']['name'] -}}{description:<|"|>{{- tool_data['function']['description'] -}}<|"|>
81
+ {%- set params = tool_data['function']['parameters'] -%}
82
+ {%- if params -%}
83
+ ,parameters:{
84
+ {%- if params['properties'] -%}
85
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
86
+ {%- endif -%}
87
+ {%- if params['required'] -%}
88
+ required:[
89
+ {%- for item in params['required'] -%}
90
+ <|"|>{{- item -}}<|"|>
91
+ {{- ',' if not loop.last -}}
92
+ {%- endfor -%}
93
+ ],
94
+ {%- endif -%}
95
+ {%- if params['type'] -%}
96
+ type:<|"|>{{- params['type'] | upper -}}<|"|>}
97
+ {%- endif -%}
98
+ {%- endif -%}
99
+ {%- if 'response' in tool_data['function'] -%}
100
+ {%- set response_declaration = tool_data['function']['response'] -%}
101
+ ,response:{
102
+ {%- if response_declaration['description'] -%}
103
+ description:<|"|>{{- response_declaration['description'] -}}<|"|>,
104
+ {%- endif -%}
105
+ {%- if response_declaration['type'] | upper == 'OBJECT' -%}
106
+ type:<|"|>{{- response_declaration['type'] | upper -}}<|"|>}
107
+ {%- endif -%}
108
+ {%- endif -%}
109
+ }
110
+ {%- endmacro -%}
111
+ {%- macro format_argument(argument, escape_keys=True) -%}
112
+ {%- if argument is string -%}
113
+ {{- '<|"|>' + argument + '<|"|>' -}}
114
+ {%- elif argument is boolean -%}
115
+ {{- 'true' if argument else 'false' -}}
116
+ {%- elif argument is mapping -%}
117
+ {{- '{' -}}
118
+ {%- set ns = namespace(found_first=false) -%}
119
+ {%- for key, value in argument | dictsort -%}
120
+ {%- if ns.found_first %},{% endif -%}
121
+ {%- set ns.found_first = true -%}
122
+ {%- if escape_keys -%}
123
+ {{- '<|"|>' + key + '<|"|>' -}}
124
+ {%- else -%}
125
+ {{- key -}}
126
+ {%- endif -%}
127
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
128
+ {%- endfor -%}
129
+ {{- '}' -}}
130
+ {%- elif argument is sequence -%}
131
+ {{- '[' -}}
132
+ {%- for item in argument -%}
133
+ {{- format_argument(item, escape_keys=escape_keys) -}}
134
+ {%- if not loop.last %},{% endif -%}
135
+ {%- endfor -%}
136
+ {{- ']' -}}
137
+ {%- else -%}
138
+ {{- argument -}}
139
+ {%- endif -%}
140
+ {%- endmacro -%}
141
+ {%- macro strip_thinking(text) -%}
142
+ {%- set ns = namespace(result='') -%}
143
+ {%- for part in text.split('<channel|>') -%}
144
+ {%- if '<|channel>' in part -%}
145
+ {%- set ns.result = ns.result + part.split('<|channel>')[0] -%}
146
+ {%- else -%}
147
+ {%- set ns.result = ns.result + part -%}
148
+ {%- endif -%}
149
+ {%- endfor -%}
150
+ {{- ns.result | trim -}}
151
+ {%- endmacro -%}
152
+
153
+ {%- set ns = namespace(prev_message_type=None) -%}
154
+ {%- set loop_messages = messages -%}
155
+ {{ bos_token }}
156
+ {#- Handle System/Tool Definitions Block -#}
157
+ {%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}
158
+ {{- '<|turn>system\n' -}}
159
+
160
+ {#- Inject Thinking token at the very top of the FIRST system turn -#}
161
+ {%- if enable_thinking is defined and enable_thinking -%}
162
+ {{- '<|think|>' -}}
163
+ {%- set ns.prev_message_type = 'think' -%}
164
+ {%- endif -%}
165
+
166
+ {%- if messages[0]['role'] in ['system', 'developer'] -%}
167
+ {{- messages[0]['content'] | trim -}}
168
+ {%- set loop_messages = messages[1:] -%}
169
+ {%- endif -%}
170
+
171
+ {%- if tools -%}
172
+ {%- for tool in tools %}
173
+ {{- '<|tool>' -}}
174
+ {{- format_function_declaration(tool) | trim -}}
175
+ {{- '<tool|>' -}}
176
+ {%- endfor %}
177
+ {%- set ns.prev_message_type = 'tool' -%}
178
+ {%- endif -%}
179
+
180
+ {{- '<turn|>\n' -}}
181
+ {%- endif %}
182
+
183
+ {#- Loop through messages -#}
184
+ {%- for message in loop_messages -%}
185
+ {%- set ns.prev_message_type = None -%}
186
+ {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}
187
+ {{- '<|turn>' + role + '\n' }}
188
+
189
+ {%- if message['tool_calls'] -%}
190
+ {%- for tool_call in message['tool_calls'] -%}
191
+ {%- set function = tool_call['function'] -%}
192
+ {{- '<|tool_call>call:' + function['name'] + '{' -}}
193
+ {%- if function['arguments'] is mapping -%}
194
+ {%- set ns_args = namespace(found_first=false) -%}
195
+ {%- for key, value in function['arguments'] | dictsort -%}
196
+ {%- if ns_args.found_first %},{% endif -%}
197
+ {%- set ns_args.found_first = true -%}
198
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
199
+ {%- endfor -%}
200
+ {%- elif function['arguments'] is string -%}
201
+ {{- function['arguments'] -}}
202
+ {%- endif -%}
203
+ {{- '}<tool_call|>' -}}
204
+ {%- endfor -%}
205
+ {%- set ns.prev_message_type = 'tool_call' -%}
206
+ {%- endif -%}
207
+
208
+ {%- if message['tool_responses'] -%}
209
+ {#- Tool Response handling -#}
210
+ {%- for tool_response in message['tool_responses'] -%}
211
+ {{- '<|tool_response>' -}}
212
+ {%- if tool_response['response'] is mapping -%}
213
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{' -}}
214
+ {%- for key, value in tool_response['response'] | dictsort -%}
215
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
216
+ {%- if not loop.last %},{% endif -%}
217
+ {%- endfor -%}
218
+ {{- '}' -}}
219
+ {%- else -%}
220
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{value:' + format_argument(tool_response['response'], escape_keys=False) + '}' -}}
221
+ {%- endif -%}
222
+ {{- '<tool_response|>' -}}
223
+ {%- endfor -%}
224
+ {%- set ns.prev_message_type = 'tool_response' -%}
225
+ {%- endif -%}
226
+
227
+ {%- if message['content'] is string -%}
228
+ {%- if role == 'model' -%}
229
+ {{- strip_thinking(message['content']) -}}
230
+ {%- else -%}
231
+ {{- message['content'] | trim -}}
232
+ {%- endif -%}
233
+ {%- elif message['content'] is sequence -%}
234
+ {%- for item in message['content'] -%}
235
+ {%- if item['type'] == 'text' -%}
236
+ {%- if role == 'model' -%}
237
+ {{- strip_thinking(item['text']) -}}
238
+ {%- else -%}
239
+ {{- item['text'] | trim -}}
240
+ {%- endif -%}
241
+ {%- elif item['type'] == 'image' -%}
242
+ {{- '\n\n<|image|>\n\n' -}}
243
+ {%- set ns.prev_message_type = 'image' -%}
244
+ {%- elif item['type'] == 'audio' -%}
245
+ {{- '<|audio|>' -}}
246
+ {%- set ns.prev_message_type = 'audio' -%}
247
+ {%- elif item['type'] == 'video' -%}
248
+ {{- '\n\n<|video|>\n\n' -}}
249
+ {%- set ns.prev_message_type = 'video' -%}
250
+ {%- endif -%}
251
+ {%- endfor -%}
252
+ {%- endif -%}
253
+
254
+ {%- if not (message['tool_responses'] and not message['content']) -%}
255
+ {{- '<turn|>\n' -}}
256
+ {%- endif -%}
257
+ {%- endfor -%}
258
+
259
+ {%- if add_generation_prompt -%}
260
+ {%- if ns.prev_message_type != 'tool_response' -%}
261
+ {{- '<|turn>model\n' -}}
262
+ {%- endif -%}
263
+ {%- if not enable_thinking | default(false) -%}
264
+ {{- '<|channel>thought\n<channel|>' -}}
265
+ {%- endif -%}
266
+ {%- endif -%}
checkpoint-121/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93915687bf2358b3909212d14e3049eb99e10df082b649515b609996d0f43a0d
3
+ size 38229709
checkpoint-121/processor_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_ms_per_token": 40,
3
+ "audio_seq_length": 750,
4
+ "feature_extractor": {
5
+ "dither": 0.0,
6
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
7
+ "feature_size": 128,
8
+ "fft_length": 512,
9
+ "fft_overdrive": false,
10
+ "frame_length": 320,
11
+ "hop_length": 160,
12
+ "input_scale_factor": 1.0,
13
+ "max_frequency": 8000.0,
14
+ "mel_floor": 0.001,
15
+ "min_frequency": 0.0,
16
+ "padding_side": "right",
17
+ "padding_value": 0.0,
18
+ "per_bin_mean": null,
19
+ "per_bin_stddev": null,
20
+ "preemphasis": 0.0,
21
+ "preemphasis_htk_flavor": true,
22
+ "return_attention_mask": true,
23
+ "sampling_rate": 16000
24
+ },
25
+ "image_processor": {
26
+ "do_convert_rgb": true,
27
+ "do_normalize": false,
28
+ "do_rescale": true,
29
+ "do_resize": true,
30
+ "image_mean": [
31
+ 0.0,
32
+ 0.0,
33
+ 0.0
34
+ ],
35
+ "image_processor_type": "Gemma4ImageProcessor",
36
+ "image_seq_length": 280,
37
+ "image_std": [
38
+ 1.0,
39
+ 1.0,
40
+ 1.0
41
+ ],
42
+ "max_soft_tokens": 280,
43
+ "patch_size": 16,
44
+ "pooling_kernel_size": 3,
45
+ "resample": 3,
46
+ "rescale_factor": 0.00392156862745098
47
+ },
48
+ "image_seq_length": 280,
49
+ "processor_class": "Gemma4Processor",
50
+ "video_processor": {
51
+ "do_convert_rgb": true,
52
+ "do_normalize": true,
53
+ "do_rescale": true,
54
+ "do_resize": true,
55
+ "do_sample_frames": true,
56
+ "image_mean": [
57
+ 0.0,
58
+ 0.0,
59
+ 0.0
60
+ ],
61
+ "image_std": [
62
+ 1.0,
63
+ 1.0,
64
+ 1.0
65
+ ],
66
+ "max_soft_tokens": 70,
67
+ "num_frames": 32,
68
+ "patch_size": 16,
69
+ "pooling_kernel_size": 3,
70
+ "resample": 3,
71
+ "rescale_factor": 0.00392156862745098,
72
+ "return_metadata": false,
73
+ "video_processor_type": "Gemma4VideoProcessor"
74
+ }
75
+ }
checkpoint-121/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959e8e9e42ca24ad2d2375468311a443a85029f21f69e8aeecbbf05f12d75103
3
+ size 14645
checkpoint-121/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be25e5f07aa9f30e24f822ea2c4935aeae0eb0c636c45fdbb3d908d7c804c2b
3
+ size 1465
checkpoint-121/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2619fe11b50dbed06ac443c51d757b354d0b62d64baa514404d4e84e6713519
3
+ size 32169780
checkpoint-121/tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [
17
+ "<|video|>"
18
+ ],
19
+ "image_token": "<|image|>",
20
+ "is_local": false,
21
+ "mask_token": "<mask>",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "model_specific_special_tokens": {
24
+ "audio_token": "<|audio|>",
25
+ "boa_token": "<|audio>",
26
+ "boi_token": "<|image>",
27
+ "eoa_token": "<audio|>",
28
+ "eoc_token": "<channel|>",
29
+ "eoi_token": "<image|>",
30
+ "eot_token": "<turn|>",
31
+ "escape_token": "<|\"|>",
32
+ "etc_token": "<tool_call|>",
33
+ "etd_token": "<tool|>",
34
+ "etr_token": "<tool_response|>",
35
+ "image_token": "<|image|>",
36
+ "soc_token": "<|channel>",
37
+ "sot_token": "<|turn>",
38
+ "stc_token": "<|tool_call>",
39
+ "std_token": "<|tool>",
40
+ "str_token": "<|tool_response>",
41
+ "think_token": "<|think|>"
42
+ },
43
+ "pad_token": "<pad>",
44
+ "padding_side": "left",
45
+ "processor_class": "Gemma4Processor",
46
+ "response_schema": {
47
+ "properties": {
48
+ "content": {
49
+ "type": "string"
50
+ },
51
+ "role": {
52
+ "const": "assistant"
53
+ },
54
+ "thinking": {
55
+ "type": "string"
56
+ },
57
+ "tool_calls": {
58
+ "items": {
59
+ "properties": {
60
+ "function": {
61
+ "properties": {
62
+ "arguments": {
63
+ "additionalProperties": {},
64
+ "type": "object",
65
+ "x-parser": "gemma4-tool-call"
66
+ },
67
+ "name": {
68
+ "type": "string"
69
+ }
70
+ },
71
+ "type": "object",
72
+ "x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})"
73
+ },
74
+ "type": {
75
+ "const": "function"
76
+ }
77
+ },
78
+ "type": "object"
79
+ },
80
+ "type": "array",
81
+ "x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>"
82
+ }
83
+ },
84
+ "type": "object",
85
+ "x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
86
+ },
87
+ "soc_token": "<|channel>",
88
+ "sot_token": "<|turn>",
89
+ "stc_token": "<|tool_call>",
90
+ "std_token": "<|tool>",
91
+ "str_token": "<|tool_response>",
92
+ "think_token": "<|think|>",
93
+ "tokenizer_class": "GemmaTokenizer",
94
+ "unk_token": "<unk>"
95
+ }
checkpoint-121/trainer_state.json ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 121,
3
+ "best_metric": 0.6695265769958496,
4
+ "best_model_checkpoint": "/home/plucky/ml-workspace/models/gemma4-26b-securecode/checkpoint-121",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 121,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.1113492242991925,
14
+ "epoch": 0.0827300930713547,
15
+ "grad_norm": 10.3125,
16
+ "learning_rate": 1.8e-05,
17
+ "loss": 93.48836059570313,
18
+ "mean_token_accuracy": 0.4107020549476147,
19
+ "num_tokens": 81920.0,
20
+ "step": 10
21
+ },
22
+ {
23
+ "entropy": 0.8875315530225635,
24
+ "epoch": 0.1654601861427094,
25
+ "grad_norm": 6.15625,
26
+ "learning_rate": 3.8e-05,
27
+ "loss": 67.76697998046875,
28
+ "mean_token_accuracy": 0.5182974558323622,
29
+ "num_tokens": 163840.0,
30
+ "step": 20
31
+ },
32
+ {
33
+ "entropy": 0.673606987670064,
34
+ "epoch": 0.2481902792140641,
35
+ "grad_norm": 2.421875,
36
+ "learning_rate": 5.8e-05,
37
+ "loss": 37.221334838867186,
38
+ "mean_token_accuracy": 0.6476027386263012,
39
+ "num_tokens": 245760.0,
40
+ "step": 30
41
+ },
42
+ {
43
+ "entropy": 1.0845661748200655,
44
+ "epoch": 0.3309203722854188,
45
+ "grad_norm": 1.3671875,
46
+ "learning_rate": 7.800000000000001e-05,
47
+ "loss": 22.017848205566406,
48
+ "mean_token_accuracy": 0.7083170266821981,
49
+ "num_tokens": 327680.0,
50
+ "step": 40
51
+ },
52
+ {
53
+ "entropy": 1.1636322166770696,
54
+ "epoch": 0.4136504653567735,
55
+ "grad_norm": 0.703125,
56
+ "learning_rate": 9.8e-05,
57
+ "loss": 17.47879638671875,
58
+ "mean_token_accuracy": 0.7332558700814843,
59
+ "num_tokens": 409600.0,
60
+ "step": 50
61
+ },
62
+ {
63
+ "entropy": 0.9551631901413202,
64
+ "epoch": 0.4963805584281282,
65
+ "grad_norm": 0.40625,
66
+ "learning_rate": 0.000118,
67
+ "loss": 15.09481201171875,
68
+ "mean_token_accuracy": 0.7555772982537746,
69
+ "num_tokens": 491520.0,
70
+ "step": 60
71
+ },
72
+ {
73
+ "entropy": 0.8048430571332574,
74
+ "epoch": 0.5791106514994829,
75
+ "grad_norm": 0.375,
76
+ "learning_rate": 0.000138,
77
+ "loss": 13.297686767578124,
78
+ "mean_token_accuracy": 0.7774828754365444,
79
+ "num_tokens": 573440.0,
80
+ "step": 70
81
+ },
82
+ {
83
+ "entropy": 0.8100443260744215,
84
+ "epoch": 0.6618407445708376,
85
+ "grad_norm": 0.4609375,
86
+ "learning_rate": 0.00015800000000000002,
87
+ "loss": 12.752572631835937,
88
+ "mean_token_accuracy": 0.7837084107100963,
89
+ "num_tokens": 655360.0,
90
+ "step": 80
91
+ },
92
+ {
93
+ "entropy": 0.7172152267768979,
94
+ "epoch": 0.7445708376421923,
95
+ "grad_norm": 2.1875,
96
+ "learning_rate": 0.00017800000000000002,
97
+ "loss": 11.629959106445312,
98
+ "mean_token_accuracy": 0.799449609220028,
99
+ "num_tokens": 737280.0,
100
+ "step": 90
101
+ },
102
+ {
103
+ "entropy": 0.7284062243998051,
104
+ "epoch": 0.827300930713547,
105
+ "grad_norm": 0.40625,
106
+ "learning_rate": 0.00019800000000000002,
107
+ "loss": 11.506278991699219,
108
+ "mean_token_accuracy": 0.8022871781140566,
109
+ "num_tokens": 819200.0,
110
+ "step": 100
111
+ },
112
+ {
113
+ "entropy": 0.6922262106090784,
114
+ "epoch": 0.9100310237849017,
115
+ "grad_norm": 0.341796875,
116
+ "learning_rate": 0.00019942266891397815,
117
+ "loss": 11.149666595458985,
118
+ "mean_token_accuracy": 0.8068982377648354,
119
+ "num_tokens": 901120.0,
120
+ "step": 110
121
+ },
122
+ {
123
+ "entropy": 0.6608987387269736,
124
+ "epoch": 0.9927611168562565,
125
+ "grad_norm": 0.373046875,
126
+ "learning_rate": 0.00019743551343638324,
127
+ "loss": 10.666960906982421,
128
+ "mean_token_accuracy": 0.8124388422816992,
129
+ "num_tokens": 983040.0,
130
+ "step": 120
131
+ },
132
+ {
133
+ "epoch": 1.0,
134
+ "eval_entropy": 0.6862195637336997,
135
+ "eval_loss": 0.6695265769958496,
136
+ "eval_mean_token_accuracy": 0.8135074851124786,
137
+ "eval_num_tokens": 990208.0,
138
+ "eval_runtime": 255.0413,
139
+ "eval_samples_per_second": 0.843,
140
+ "eval_steps_per_second": 0.843,
141
+ "step": 121
142
+ }
143
+ ],
144
+ "logging_steps": 10,
145
+ "max_steps": 363,
146
+ "num_input_tokens_seen": 0,
147
+ "num_train_epochs": 3,
148
+ "save_steps": 500,
149
+ "stateful_callbacks": {
150
+ "TrainerControl": {
151
+ "args": {
152
+ "should_epoch_stop": false,
153
+ "should_evaluate": false,
154
+ "should_log": false,
155
+ "should_save": true,
156
+ "should_training_stop": false
157
+ },
158
+ "attributes": {}
159
+ }
160
+ },
161
+ "total_flos": 1.4904406021973606e+17,
162
+ "train_batch_size": 1,
163
+ "trial_name": null,
164
+ "trial_params": null
165
+ }
checkpoint-121/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eaf4e1eba101412810b250e27914b2df87f93b0a9c62028451f50813e692b8e
3
+ size 5713
checkpoint-242/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: google/gemma-4-26b-a4b-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:google/gemma-4-26b-a4b-it
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.2.dev0
checkpoint-242/adapter_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "google/gemma-4-26b-a4b-it",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.18.2.dev0@e7355a3b2233820f6f30e558ce133ed22673a087",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "model.language_model.layers.4.self_attn.k_proj",
34
+ "model.language_model.layers.17.self_attn.o_proj",
35
+ "model.language_model.layers.3.mlp.up_proj",
36
+ "model.language_model.layers.17.mlp.up_proj",
37
+ "model.language_model.layers.8.mlp.down_proj",
38
+ "model.language_model.layers.27.self_attn.k_proj",
39
+ "model.language_model.layers.28.mlp.down_proj",
40
+ "model.language_model.layers.6.mlp.up_proj",
41
+ "model.language_model.layers.24.self_attn.k_proj",
42
+ "model.language_model.layers.6.self_attn.q_proj",
43
+ "model.language_model.layers.17.self_attn.q_proj",
44
+ "model.language_model.layers.15.self_attn.k_proj",
45
+ "model.language_model.layers.24.mlp.up_proj",
46
+ "model.language_model.layers.19.mlp.gate_proj",
47
+ "model.language_model.layers.16.self_attn.k_proj",
48
+ "model.language_model.layers.26.self_attn.q_proj",
49
+ "model.language_model.layers.21.mlp.up_proj",
50
+ "model.language_model.layers.17.mlp.down_proj",
51
+ "model.language_model.layers.10.self_attn.v_proj",
52
+ "model.language_model.layers.25.mlp.down_proj",
53
+ "model.language_model.layers.11.mlp.up_proj",
54
+ "model.language_model.layers.2.self_attn.o_proj",
55
+ "model.language_model.layers.15.mlp.down_proj",
56
+ "model.language_model.layers.10.self_attn.k_proj",
57
+ "model.language_model.layers.15.self_attn.q_proj",
58
+ "model.language_model.layers.9.self_attn.v_proj",
59
+ "model.language_model.layers.27.self_attn.o_proj",
60
+ "model.language_model.layers.3.self_attn.v_proj",
61
+ "model.language_model.layers.10.self_attn.q_proj",
62
+ "model.language_model.layers.21.mlp.gate_proj",
63
+ "model.language_model.layers.25.self_attn.q_proj",
64
+ "model.language_model.layers.5.self_attn.o_proj",
65
+ "model.language_model.layers.2.mlp.gate_proj",
66
+ "model.language_model.layers.9.mlp.gate_proj",
67
+ "model.language_model.layers.19.self_attn.v_proj",
68
+ "model.language_model.layers.18.self_attn.k_proj",
69
+ "model.language_model.layers.19.mlp.down_proj",
70
+ "model.language_model.layers.23.self_attn.o_proj",
71
+ "model.language_model.layers.27.mlp.gate_proj",
72
+ "model.language_model.layers.0.mlp.up_proj",
73
+ "model.language_model.layers.20.mlp.gate_proj",
74
+ "model.language_model.layers.28.self_attn.o_proj",
75
+ "model.language_model.layers.4.self_attn.o_proj",
76
+ "model.language_model.layers.28.self_attn.v_proj",
77
+ "model.language_model.layers.11.self_attn.q_proj",
78
+ "model.language_model.layers.26.self_attn.o_proj",
79
+ "model.language_model.layers.9.mlp.down_proj",
80
+ "model.language_model.layers.27.self_attn.v_proj",
81
+ "model.language_model.layers.23.mlp.up_proj",
82
+ "model.language_model.layers.2.mlp.up_proj",
83
+ "model.language_model.layers.0.mlp.gate_proj",
84
+ "model.language_model.layers.18.self_attn.o_proj",
85
+ "model.language_model.layers.19.self_attn.k_proj",
86
+ "model.language_model.layers.10.mlp.down_proj",
87
+ "model.language_model.layers.10.mlp.gate_proj",
88
+ "model.language_model.layers.0.self_attn.o_proj",
89
+ "model.language_model.layers.20.mlp.down_proj",
90
+ "model.language_model.layers.10.self_attn.o_proj",
91
+ "model.language_model.layers.15.self_attn.o_proj",
92
+ "model.language_model.layers.18.mlp.down_proj",
93
+ "model.language_model.layers.1.self_attn.v_proj",
94
+ "model.language_model.layers.13.self_attn.q_proj",
95
+ "model.language_model.layers.18.self_attn.q_proj",
96
+ "model.language_model.layers.3.mlp.down_proj",
97
+ "model.language_model.layers.20.self_attn.k_proj",
98
+ "model.language_model.layers.14.self_attn.o_proj",
99
+ "model.language_model.layers.7.mlp.down_proj",
100
+ "model.language_model.layers.25.self_attn.v_proj",
101
+ "model.language_model.layers.29.mlp.gate_proj",
102
+ "model.language_model.layers.2.self_attn.k_proj",
103
+ "model.language_model.layers.5.self_attn.k_proj",
104
+ "model.language_model.layers.9.self_attn.k_proj",
105
+ "model.language_model.layers.1.mlp.gate_proj",
106
+ "model.language_model.layers.8.self_attn.o_proj",
107
+ "model.language_model.layers.22.self_attn.k_proj",
108
+ "model.language_model.layers.3.self_attn.q_proj",
109
+ "model.language_model.layers.23.self_attn.k_proj",
110
+ "model.language_model.layers.3.self_attn.k_proj",
111
+ "model.language_model.layers.19.self_attn.q_proj",
112
+ "model.language_model.layers.18.self_attn.v_proj",
113
+ "model.language_model.layers.10.mlp.up_proj",
114
+ "model.language_model.layers.11.mlp.gate_proj",
115
+ "model.language_model.layers.1.mlp.up_proj",
116
+ "model.language_model.layers.18.mlp.gate_proj",
117
+ "model.language_model.layers.8.mlp.gate_proj",
118
+ "model.language_model.layers.7.mlp.gate_proj",
119
+ "model.language_model.layers.8.mlp.up_proj",
120
+ "model.language_model.layers.5.self_attn.q_proj",
121
+ "model.language_model.layers.14.self_attn.k_proj",
122
+ "model.language_model.layers.22.self_attn.q_proj",
123
+ "model.language_model.layers.4.mlp.down_proj",
124
+ "model.language_model.layers.22.mlp.gate_proj",
125
+ "model.language_model.layers.15.self_attn.v_proj",
126
+ "model.language_model.layers.21.self_attn.o_proj",
127
+ "model.language_model.layers.11.self_attn.o_proj",
128
+ "model.language_model.layers.20.mlp.up_proj",
129
+ "model.language_model.layers.16.self_attn.q_proj",
130
+ "model.language_model.layers.1.self_attn.k_proj",
131
+ "model.language_model.layers.24.mlp.gate_proj",
132
+ "model.language_model.layers.26.mlp.gate_proj",
133
+ "model.language_model.layers.2.self_attn.q_proj",
134
+ "model.language_model.layers.4.mlp.gate_proj",
135
+ "model.language_model.layers.7.self_attn.q_proj",
136
+ "model.language_model.layers.14.self_attn.v_proj",
137
+ "model.language_model.layers.27.self_attn.q_proj",
138
+ "model.language_model.layers.29.mlp.up_proj",
139
+ "model.language_model.layers.28.self_attn.k_proj",
140
+ "model.language_model.layers.24.self_attn.o_proj",
141
+ "model.language_model.layers.26.self_attn.k_proj",
142
+ "model.language_model.layers.21.mlp.down_proj",
143
+ "model.language_model.layers.14.mlp.gate_proj",
144
+ "model.language_model.layers.25.mlp.up_proj",
145
+ "model.language_model.layers.27.mlp.down_proj",
146
+ "model.language_model.layers.20.self_attn.v_proj",
147
+ "model.language_model.layers.0.mlp.down_proj",
148
+ "model.language_model.layers.6.self_attn.v_proj",
149
+ "model.language_model.layers.4.self_attn.q_proj",
150
+ "model.language_model.layers.9.self_attn.q_proj",
151
+ "model.language_model.layers.0.self_attn.q_proj",
152
+ "model.language_model.layers.27.mlp.up_proj",
153
+ "model.language_model.layers.29.self_attn.k_proj",
154
+ "model.language_model.layers.29.self_attn.q_proj",
155
+ "model.language_model.layers.12.mlp.up_proj",
156
+ "model.language_model.layers.6.mlp.down_proj",
157
+ "model.language_model.layers.2.mlp.down_proj",
158
+ "model.language_model.layers.6.mlp.gate_proj",
159
+ "model.language_model.layers.24.self_attn.v_proj",
160
+ "model.language_model.layers.4.mlp.up_proj",
161
+ "model.language_model.layers.9.self_attn.o_proj",
162
+ "model.language_model.layers.22.self_attn.v_proj",
163
+ "model.language_model.layers.23.mlp.gate_proj",
164
+ "model.language_model.layers.5.mlp.down_proj",
165
+ "model.language_model.layers.13.self_attn.o_proj",
166
+ "model.language_model.layers.14.mlp.up_proj",
167
+ "model.language_model.layers.15.mlp.gate_proj",
168
+ "model.language_model.layers.19.self_attn.o_proj",
169
+ "model.language_model.layers.24.mlp.down_proj",
170
+ "model.language_model.layers.21.self_attn.q_proj",
171
+ "model.language_model.layers.15.mlp.up_proj",
172
+ "model.language_model.layers.26.mlp.up_proj",
173
+ "model.language_model.layers.26.mlp.down_proj",
174
+ "model.language_model.layers.25.self_attn.o_proj",
175
+ "model.language_model.layers.8.self_attn.v_proj",
176
+ "model.language_model.layers.12.self_attn.o_proj",
177
+ "model.language_model.layers.6.self_attn.k_proj",
178
+ "model.language_model.layers.17.mlp.gate_proj",
179
+ "model.language_model.layers.12.self_attn.k_proj",
180
+ "model.language_model.layers.13.mlp.down_proj",
181
+ "model.language_model.layers.1.mlp.down_proj",
182
+ "model.language_model.layers.3.mlp.gate_proj",
183
+ "model.language_model.layers.14.mlp.down_proj",
184
+ "model.language_model.layers.9.mlp.up_proj",
185
+ "model.language_model.layers.21.self_attn.k_proj",
186
+ "model.language_model.layers.6.self_attn.o_proj",
187
+ "model.language_model.layers.0.self_attn.v_proj",
188
+ "model.language_model.layers.16.mlp.down_proj",
189
+ "model.language_model.layers.8.self_attn.k_proj",
190
+ "model.language_model.layers.12.mlp.gate_proj",
191
+ "model.language_model.layers.7.self_attn.o_proj",
192
+ "model.language_model.layers.18.mlp.up_proj",
193
+ "model.language_model.layers.13.mlp.up_proj",
194
+ "model.language_model.layers.16.mlp.up_proj",
195
+ "model.language_model.layers.17.self_attn.k_proj",
196
+ "model.language_model.layers.25.self_attn.k_proj",
197
+ "model.language_model.layers.8.self_attn.q_proj",
198
+ "model.language_model.layers.4.self_attn.v_proj",
199
+ "model.language_model.layers.23.self_attn.q_proj",
200
+ "model.language_model.layers.1.self_attn.o_proj",
201
+ "model.language_model.layers.5.mlp.up_proj",
202
+ "model.language_model.layers.13.self_attn.k_proj",
203
+ "model.language_model.layers.7.self_attn.k_proj",
204
+ "model.language_model.layers.22.self_attn.o_proj",
205
+ "model.language_model.layers.22.mlp.up_proj",
206
+ "model.language_model.layers.16.self_attn.o_proj",
207
+ "model.language_model.layers.24.self_attn.q_proj",
208
+ "model.language_model.layers.12.self_attn.q_proj",
209
+ "model.language_model.layers.2.self_attn.v_proj",
210
+ "model.language_model.layers.12.self_attn.v_proj",
211
+ "model.language_model.layers.13.mlp.gate_proj",
212
+ "model.language_model.layers.12.mlp.down_proj",
213
+ "model.language_model.layers.14.self_attn.q_proj",
214
+ "model.language_model.layers.26.self_attn.v_proj",
215
+ "model.language_model.layers.28.mlp.up_proj",
216
+ "model.language_model.layers.19.mlp.up_proj",
217
+ "model.language_model.layers.16.mlp.gate_proj",
218
+ "model.language_model.layers.7.self_attn.v_proj",
219
+ "model.language_model.layers.25.mlp.gate_proj",
220
+ "model.language_model.layers.13.self_attn.v_proj",
221
+ "model.language_model.layers.20.self_attn.q_proj",
222
+ "model.language_model.layers.5.mlp.gate_proj",
223
+ "model.language_model.layers.1.self_attn.q_proj",
224
+ "model.language_model.layers.11.mlp.down_proj",
225
+ "model.language_model.layers.0.self_attn.k_proj",
226
+ "model.language_model.layers.21.self_attn.v_proj",
227
+ "model.language_model.layers.28.self_attn.q_proj",
228
+ "model.language_model.layers.29.self_attn.o_proj",
229
+ "model.language_model.layers.11.self_attn.k_proj",
230
+ "model.language_model.layers.29.mlp.down_proj",
231
+ "model.language_model.layers.7.mlp.up_proj",
232
+ "model.language_model.layers.22.mlp.down_proj",
233
+ "model.language_model.layers.20.self_attn.o_proj",
234
+ "model.language_model.layers.3.self_attn.o_proj",
235
+ "model.language_model.layers.23.mlp.down_proj",
236
+ "model.language_model.layers.16.self_attn.v_proj",
237
+ "model.language_model.layers.28.mlp.gate_proj"
238
+ ],
239
+ "target_parameters": null,
240
+ "task_type": "CAUSAL_LM",
241
+ "trainable_token_indices": null,
242
+ "use_bdlora": null,
243
+ "use_dora": false,
244
+ "use_qalora": false,
245
+ "use_rslora": false
246
+ }
checkpoint-242/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c31eb96af91247162f1fdd882f14bf908d1c3c4b7925203d58312809b5007e
3
+ size 37232104
checkpoint-242/chat_template.jinja ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- set add_comma = false -%}
6
+ {%- if key not in standard_keys -%}
7
+ {%- if ns.found_first %},{% endif -%}
8
+ {%- set ns.found_first = true -%}
9
+ {{ key }}:{
10
+ {%- if value['description'] -%}
11
+ description:<|"|>{{ value['description'] }}<|"|>
12
+ {%- set add_comma = true -%}
13
+ {%- endif -%}
14
+ {%- if value['nullable'] %}
15
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
16
+ nullable:true
17
+ {%- endif -%}
18
+ {%- if value['type'] | upper == 'STRING' -%}
19
+ {%- if value['enum'] -%}
20
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
21
+ enum:{{ format_argument(value['enum']) }}
22
+ {%- endif -%}
23
+ {%- elif value['type'] | upper == 'OBJECT' -%}
24
+ ,properties:{
25
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
26
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
27
+ {%- elif value is mapping -%}
28
+ {{- format_parameters(value, value['required'] | default([])) -}}
29
+ {%- endif -%}
30
+ }
31
+ {%- if value['required'] -%}
32
+ ,required:[
33
+ {%- for item in value['required'] | default([]) -%}
34
+ <|"|>{{- item -}}<|"|>
35
+ {%- if not loop.last %},{% endif -%}
36
+ {%- endfor -%}
37
+ ]
38
+ {%- endif -%}
39
+ {%- elif value['type'] | upper == 'ARRAY' -%}
40
+ {%- if value['items'] is mapping and value['items'] -%}
41
+ ,items:{
42
+ {%- set ns_items = namespace(found_first=false) -%}
43
+ {%- for item_key, item_value in value['items'] | dictsort -%}
44
+ {%- if item_value is not none -%}
45
+ {%- if ns_items.found_first %},{% endif -%}
46
+ {%- set ns_items.found_first = true -%}
47
+ {%- if item_key == 'properties' -%}
48
+ properties:{
49
+ {%- if item_value is mapping -%}
50
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
51
+ {%- endif -%}
52
+ }
53
+ {%- elif item_key == 'required' -%}
54
+ required:[
55
+ {%- for req_item in item_value -%}
56
+ <|"|>{{- req_item -}}<|"|>
57
+ {%- if not loop.last %},{% endif -%}
58
+ {%- endfor -%}
59
+ ]
60
+ {%- elif item_key == 'type' -%}
61
+ {%- if item_value is string -%}
62
+ type:{{ format_argument(item_value | upper) }}
63
+ {%- else -%}
64
+ type:{{ format_argument(item_value | map('upper') | list) }}
65
+ {%- endif -%}
66
+ {%- else -%}
67
+ {{ item_key }}:{{ format_argument(item_value) }}
68
+ {%- endif -%}
69
+ {%- endif -%}
70
+ {%- endfor -%}
71
+ }
72
+ {%- endif -%}
73
+ {%- endif -%}
74
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
75
+ type:<|"|>{{ value['type'] | upper }}<|"|>}
76
+ {%- endif -%}
77
+ {%- endfor -%}
78
+ {%- endmacro -%}
79
+ {%- macro format_function_declaration(tool_data) -%}
80
+ declaration:{{- tool_data['function']['name'] -}}{description:<|"|>{{- tool_data['function']['description'] -}}<|"|>
81
+ {%- set params = tool_data['function']['parameters'] -%}
82
+ {%- if params -%}
83
+ ,parameters:{
84
+ {%- if params['properties'] -%}
85
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
86
+ {%- endif -%}
87
+ {%- if params['required'] -%}
88
+ required:[
89
+ {%- for item in params['required'] -%}
90
+ <|"|>{{- item -}}<|"|>
91
+ {{- ',' if not loop.last -}}
92
+ {%- endfor -%}
93
+ ],
94
+ {%- endif -%}
95
+ {%- if params['type'] -%}
96
+ type:<|"|>{{- params['type'] | upper -}}<|"|>}
97
+ {%- endif -%}
98
+ {%- endif -%}
99
+ {%- if 'response' in tool_data['function'] -%}
100
+ {%- set response_declaration = tool_data['function']['response'] -%}
101
+ ,response:{
102
+ {%- if response_declaration['description'] -%}
103
+ description:<|"|>{{- response_declaration['description'] -}}<|"|>,
104
+ {%- endif -%}
105
+ {%- if response_declaration['type'] | upper == 'OBJECT' -%}
106
+ type:<|"|>{{- response_declaration['type'] | upper -}}<|"|>}
107
+ {%- endif -%}
108
+ {%- endif -%}
109
+ }
110
+ {%- endmacro -%}
111
+ {%- macro format_argument(argument, escape_keys=True) -%}
112
+ {%- if argument is string -%}
113
+ {{- '<|"|>' + argument + '<|"|>' -}}
114
+ {%- elif argument is boolean -%}
115
+ {{- 'true' if argument else 'false' -}}
116
+ {%- elif argument is mapping -%}
117
+ {{- '{' -}}
118
+ {%- set ns = namespace(found_first=false) -%}
119
+ {%- for key, value in argument | dictsort -%}
120
+ {%- if ns.found_first %},{% endif -%}
121
+ {%- set ns.found_first = true -%}
122
+ {%- if escape_keys -%}
123
+ {{- '<|"|>' + key + '<|"|>' -}}
124
+ {%- else -%}
125
+ {{- key -}}
126
+ {%- endif -%}
127
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
128
+ {%- endfor -%}
129
+ {{- '}' -}}
130
+ {%- elif argument is sequence -%}
131
+ {{- '[' -}}
132
+ {%- for item in argument -%}
133
+ {{- format_argument(item, escape_keys=escape_keys) -}}
134
+ {%- if not loop.last %},{% endif -%}
135
+ {%- endfor -%}
136
+ {{- ']' -}}
137
+ {%- else -%}
138
+ {{- argument -}}
139
+ {%- endif -%}
140
+ {%- endmacro -%}
141
+ {%- macro strip_thinking(text) -%}
142
+ {%- set ns = namespace(result='') -%}
143
+ {%- for part in text.split('<channel|>') -%}
144
+ {%- if '<|channel>' in part -%}
145
+ {%- set ns.result = ns.result + part.split('<|channel>')[0] -%}
146
+ {%- else -%}
147
+ {%- set ns.result = ns.result + part -%}
148
+ {%- endif -%}
149
+ {%- endfor -%}
150
+ {{- ns.result | trim -}}
151
+ {%- endmacro -%}
152
+
153
+ {%- set ns = namespace(prev_message_type=None) -%}
154
+ {%- set loop_messages = messages -%}
155
+ {{ bos_token }}
156
+ {#- Handle System/Tool Definitions Block -#}
157
+ {%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}
158
+ {{- '<|turn>system\n' -}}
159
+
160
+ {#- Inject Thinking token at the very top of the FIRST system turn -#}
161
+ {%- if enable_thinking is defined and enable_thinking -%}
162
+ {{- '<|think|>' -}}
163
+ {%- set ns.prev_message_type = 'think' -%}
164
+ {%- endif -%}
165
+
166
+ {%- if messages[0]['role'] in ['system', 'developer'] -%}
167
+ {{- messages[0]['content'] | trim -}}
168
+ {%- set loop_messages = messages[1:] -%}
169
+ {%- endif -%}
170
+
171
+ {%- if tools -%}
172
+ {%- for tool in tools %}
173
+ {{- '<|tool>' -}}
174
+ {{- format_function_declaration(tool) | trim -}}
175
+ {{- '<tool|>' -}}
176
+ {%- endfor %}
177
+ {%- set ns.prev_message_type = 'tool' -%}
178
+ {%- endif -%}
179
+
180
+ {{- '<turn|>\n' -}}
181
+ {%- endif %}
182
+
183
+ {#- Loop through messages -#}
184
+ {%- for message in loop_messages -%}
185
+ {%- set ns.prev_message_type = None -%}
186
+ {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}
187
+ {{- '<|turn>' + role + '\n' }}
188
+
189
+ {%- if message['tool_calls'] -%}
190
+ {%- for tool_call in message['tool_calls'] -%}
191
+ {%- set function = tool_call['function'] -%}
192
+ {{- '<|tool_call>call:' + function['name'] + '{' -}}
193
+ {%- if function['arguments'] is mapping -%}
194
+ {%- set ns_args = namespace(found_first=false) -%}
195
+ {%- for key, value in function['arguments'] | dictsort -%}
196
+ {%- if ns_args.found_first %},{% endif -%}
197
+ {%- set ns_args.found_first = true -%}
198
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
199
+ {%- endfor -%}
200
+ {%- elif function['arguments'] is string -%}
201
+ {{- function['arguments'] -}}
202
+ {%- endif -%}
203
+ {{- '}<tool_call|>' -}}
204
+ {%- endfor -%}
205
+ {%- set ns.prev_message_type = 'tool_call' -%}
206
+ {%- endif -%}
207
+
208
+ {%- if message['tool_responses'] -%}
209
+ {#- Tool Response handling -#}
210
+ {%- for tool_response in message['tool_responses'] -%}
211
+ {{- '<|tool_response>' -}}
212
+ {%- if tool_response['response'] is mapping -%}
213
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{' -}}
214
+ {%- for key, value in tool_response['response'] | dictsort -%}
215
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
216
+ {%- if not loop.last %},{% endif -%}
217
+ {%- endfor -%}
218
+ {{- '}' -}}
219
+ {%- else -%}
220
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{value:' + format_argument(tool_response['response'], escape_keys=False) + '}' -}}
221
+ {%- endif -%}
222
+ {{- '<tool_response|>' -}}
223
+ {%- endfor -%}
224
+ {%- set ns.prev_message_type = 'tool_response' -%}
225
+ {%- endif -%}
226
+
227
+ {%- if message['content'] is string -%}
228
+ {%- if role == 'model' -%}
229
+ {{- strip_thinking(message['content']) -}}
230
+ {%- else -%}
231
+ {{- message['content'] | trim -}}
232
+ {%- endif -%}
233
+ {%- elif message['content'] is sequence -%}
234
+ {%- for item in message['content'] -%}
235
+ {%- if item['type'] == 'text' -%}
236
+ {%- if role == 'model' -%}
237
+ {{- strip_thinking(item['text']) -}}
238
+ {%- else -%}
239
+ {{- item['text'] | trim -}}
240
+ {%- endif -%}
241
+ {%- elif item['type'] == 'image' -%}
242
+ {{- '\n\n<|image|>\n\n' -}}
243
+ {%- set ns.prev_message_type = 'image' -%}
244
+ {%- elif item['type'] == 'audio' -%}
245
+ {{- '<|audio|>' -}}
246
+ {%- set ns.prev_message_type = 'audio' -%}
247
+ {%- elif item['type'] == 'video' -%}
248
+ {{- '\n\n<|video|>\n\n' -}}
249
+ {%- set ns.prev_message_type = 'video' -%}
250
+ {%- endif -%}
251
+ {%- endfor -%}
252
+ {%- endif -%}
253
+
254
+ {%- if not (message['tool_responses'] and not message['content']) -%}
255
+ {{- '<turn|>\n' -}}
256
+ {%- endif -%}
257
+ {%- endfor -%}
258
+
259
+ {%- if add_generation_prompt -%}
260
+ {%- if ns.prev_message_type != 'tool_response' -%}
261
+ {{- '<|turn>model\n' -}}
262
+ {%- endif -%}
263
+ {%- if not enable_thinking | default(false) -%}
264
+ {{- '<|channel>thought\n<channel|>' -}}
265
+ {%- endif -%}
266
+ {%- endif -%}
checkpoint-242/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01cc0e44a43e58c8b45161465df8b4fadbd469fd35fab1d5c423759ee5ba8a68
3
+ size 38229709
checkpoint-242/processor_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_ms_per_token": 40,
3
+ "audio_seq_length": 750,
4
+ "feature_extractor": {
5
+ "dither": 0.0,
6
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
7
+ "feature_size": 128,
8
+ "fft_length": 512,
9
+ "fft_overdrive": false,
10
+ "frame_length": 320,
11
+ "hop_length": 160,
12
+ "input_scale_factor": 1.0,
13
+ "max_frequency": 8000.0,
14
+ "mel_floor": 0.001,
15
+ "min_frequency": 0.0,
16
+ "padding_side": "right",
17
+ "padding_value": 0.0,
18
+ "per_bin_mean": null,
19
+ "per_bin_stddev": null,
20
+ "preemphasis": 0.0,
21
+ "preemphasis_htk_flavor": true,
22
+ "return_attention_mask": true,
23
+ "sampling_rate": 16000
24
+ },
25
+ "image_processor": {
26
+ "do_convert_rgb": true,
27
+ "do_normalize": false,
28
+ "do_rescale": true,
29
+ "do_resize": true,
30
+ "image_mean": [
31
+ 0.0,
32
+ 0.0,
33
+ 0.0
34
+ ],
35
+ "image_processor_type": "Gemma4ImageProcessor",
36
+ "image_seq_length": 280,
37
+ "image_std": [
38
+ 1.0,
39
+ 1.0,
40
+ 1.0
41
+ ],
42
+ "max_soft_tokens": 280,
43
+ "patch_size": 16,
44
+ "pooling_kernel_size": 3,
45
+ "resample": 3,
46
+ "rescale_factor": 0.00392156862745098
47
+ },
48
+ "image_seq_length": 280,
49
+ "processor_class": "Gemma4Processor",
50
+ "video_processor": {
51
+ "do_convert_rgb": true,
52
+ "do_normalize": true,
53
+ "do_rescale": true,
54
+ "do_resize": true,
55
+ "do_sample_frames": true,
56
+ "image_mean": [
57
+ 0.0,
58
+ 0.0,
59
+ 0.0
60
+ ],
61
+ "image_std": [
62
+ 1.0,
63
+ 1.0,
64
+ 1.0
65
+ ],
66
+ "max_soft_tokens": 70,
67
+ "num_frames": 32,
68
+ "patch_size": 16,
69
+ "pooling_kernel_size": 3,
70
+ "resample": 3,
71
+ "rescale_factor": 0.00392156862745098,
72
+ "return_metadata": false,
73
+ "video_processor_type": "Gemma4VideoProcessor"
74
+ }
75
+ }
checkpoint-242/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a6c6b866e89a5917944b74173a0b8536ce4695e579297378c983b24e5a507b
3
+ size 14645
checkpoint-242/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7702a97a98d09da954174b71008538d77d6adeaeaeb17c9732ebeab8932e0e3e
3
+ size 1465
checkpoint-242/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2619fe11b50dbed06ac443c51d757b354d0b62d64baa514404d4e84e6713519
3
+ size 32169780
checkpoint-242/tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [
17
+ "<|video|>"
18
+ ],
19
+ "image_token": "<|image|>",
20
+ "is_local": false,
21
+ "mask_token": "<mask>",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "model_specific_special_tokens": {
24
+ "audio_token": "<|audio|>",
25
+ "boa_token": "<|audio>",
26
+ "boi_token": "<|image>",
27
+ "eoa_token": "<audio|>",
28
+ "eoc_token": "<channel|>",
29
+ "eoi_token": "<image|>",
30
+ "eot_token": "<turn|>",
31
+ "escape_token": "<|\"|>",
32
+ "etc_token": "<tool_call|>",
33
+ "etd_token": "<tool|>",
34
+ "etr_token": "<tool_response|>",
35
+ "image_token": "<|image|>",
36
+ "soc_token": "<|channel>",
37
+ "sot_token": "<|turn>",
38
+ "stc_token": "<|tool_call>",
39
+ "std_token": "<|tool>",
40
+ "str_token": "<|tool_response>",
41
+ "think_token": "<|think|>"
42
+ },
43
+ "pad_token": "<pad>",
44
+ "padding_side": "left",
45
+ "processor_class": "Gemma4Processor",
46
+ "response_schema": {
47
+ "properties": {
48
+ "content": {
49
+ "type": "string"
50
+ },
51
+ "role": {
52
+ "const": "assistant"
53
+ },
54
+ "thinking": {
55
+ "type": "string"
56
+ },
57
+ "tool_calls": {
58
+ "items": {
59
+ "properties": {
60
+ "function": {
61
+ "properties": {
62
+ "arguments": {
63
+ "additionalProperties": {},
64
+ "type": "object",
65
+ "x-parser": "gemma4-tool-call"
66
+ },
67
+ "name": {
68
+ "type": "string"
69
+ }
70
+ },
71
+ "type": "object",
72
+ "x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})"
73
+ },
74
+ "type": {
75
+ "const": "function"
76
+ }
77
+ },
78
+ "type": "object"
79
+ },
80
+ "type": "array",
81
+ "x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>"
82
+ }
83
+ },
84
+ "type": "object",
85
+ "x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
86
+ },
87
+ "soc_token": "<|channel>",
88
+ "sot_token": "<|turn>",
89
+ "stc_token": "<|tool_call>",
90
+ "std_token": "<|tool>",
91
+ "str_token": "<|tool_response>",
92
+ "think_token": "<|think|>",
93
+ "tokenizer_class": "GemmaTokenizer",
94
+ "unk_token": "<unk>"
95
+ }
checkpoint-242/trainer_state.json ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 242,
3
+ "best_metric": 0.6102388501167297,
4
+ "best_model_checkpoint": "/home/plucky/ml-workspace/models/gemma4-26b-securecode/checkpoint-242",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 242,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.1113492242991925,
14
+ "epoch": 0.0827300930713547,
15
+ "grad_norm": 10.3125,
16
+ "learning_rate": 1.8e-05,
17
+ "loss": 93.48836059570313,
18
+ "mean_token_accuracy": 0.4107020549476147,
19
+ "num_tokens": 81920.0,
20
+ "step": 10
21
+ },
22
+ {
23
+ "entropy": 0.8875315530225635,
24
+ "epoch": 0.1654601861427094,
25
+ "grad_norm": 6.15625,
26
+ "learning_rate": 3.8e-05,
27
+ "loss": 67.76697998046875,
28
+ "mean_token_accuracy": 0.5182974558323622,
29
+ "num_tokens": 163840.0,
30
+ "step": 20
31
+ },
32
+ {
33
+ "entropy": 0.673606987670064,
34
+ "epoch": 0.2481902792140641,
35
+ "grad_norm": 2.421875,
36
+ "learning_rate": 5.8e-05,
37
+ "loss": 37.221334838867186,
38
+ "mean_token_accuracy": 0.6476027386263012,
39
+ "num_tokens": 245760.0,
40
+ "step": 30
41
+ },
42
+ {
43
+ "entropy": 1.0845661748200655,
44
+ "epoch": 0.3309203722854188,
45
+ "grad_norm": 1.3671875,
46
+ "learning_rate": 7.800000000000001e-05,
47
+ "loss": 22.017848205566406,
48
+ "mean_token_accuracy": 0.7083170266821981,
49
+ "num_tokens": 327680.0,
50
+ "step": 40
51
+ },
52
+ {
53
+ "entropy": 1.1636322166770696,
54
+ "epoch": 0.4136504653567735,
55
+ "grad_norm": 0.703125,
56
+ "learning_rate": 9.8e-05,
57
+ "loss": 17.47879638671875,
58
+ "mean_token_accuracy": 0.7332558700814843,
59
+ "num_tokens": 409600.0,
60
+ "step": 50
61
+ },
62
+ {
63
+ "entropy": 0.9551631901413202,
64
+ "epoch": 0.4963805584281282,
65
+ "grad_norm": 0.40625,
66
+ "learning_rate": 0.000118,
67
+ "loss": 15.09481201171875,
68
+ "mean_token_accuracy": 0.7555772982537746,
69
+ "num_tokens": 491520.0,
70
+ "step": 60
71
+ },
72
+ {
73
+ "entropy": 0.8048430571332574,
74
+ "epoch": 0.5791106514994829,
75
+ "grad_norm": 0.375,
76
+ "learning_rate": 0.000138,
77
+ "loss": 13.297686767578124,
78
+ "mean_token_accuracy": 0.7774828754365444,
79
+ "num_tokens": 573440.0,
80
+ "step": 70
81
+ },
82
+ {
83
+ "entropy": 0.8100443260744215,
84
+ "epoch": 0.6618407445708376,
85
+ "grad_norm": 0.4609375,
86
+ "learning_rate": 0.00015800000000000002,
87
+ "loss": 12.752572631835937,
88
+ "mean_token_accuracy": 0.7837084107100963,
89
+ "num_tokens": 655360.0,
90
+ "step": 80
91
+ },
92
+ {
93
+ "entropy": 0.7172152267768979,
94
+ "epoch": 0.7445708376421923,
95
+ "grad_norm": 2.1875,
96
+ "learning_rate": 0.00017800000000000002,
97
+ "loss": 11.629959106445312,
98
+ "mean_token_accuracy": 0.799449609220028,
99
+ "num_tokens": 737280.0,
100
+ "step": 90
101
+ },
102
+ {
103
+ "entropy": 0.7284062243998051,
104
+ "epoch": 0.827300930713547,
105
+ "grad_norm": 0.40625,
106
+ "learning_rate": 0.00019800000000000002,
107
+ "loss": 11.506278991699219,
108
+ "mean_token_accuracy": 0.8022871781140566,
109
+ "num_tokens": 819200.0,
110
+ "step": 100
111
+ },
112
+ {
113
+ "entropy": 0.6922262106090784,
114
+ "epoch": 0.9100310237849017,
115
+ "grad_norm": 0.341796875,
116
+ "learning_rate": 0.00019942266891397815,
117
+ "loss": 11.149666595458985,
118
+ "mean_token_accuracy": 0.8068982377648354,
119
+ "num_tokens": 901120.0,
120
+ "step": 110
121
+ },
122
+ {
123
+ "entropy": 0.6608987387269736,
124
+ "epoch": 0.9927611168562565,
125
+ "grad_norm": 0.373046875,
126
+ "learning_rate": 0.00019743551343638324,
127
+ "loss": 10.666960906982421,
128
+ "mean_token_accuracy": 0.8124388422816992,
129
+ "num_tokens": 983040.0,
130
+ "step": 120
131
+ },
132
+ {
133
+ "epoch": 1.0,
134
+ "eval_entropy": 0.6862195637336997,
135
+ "eval_loss": 0.6695265769958496,
136
+ "eval_mean_token_accuracy": 0.8135074851124786,
137
+ "eval_num_tokens": 990208.0,
138
+ "eval_runtime": 255.0413,
139
+ "eval_samples_per_second": 0.843,
140
+ "eval_steps_per_second": 0.843,
141
+ "step": 121
142
+ },
143
+ {
144
+ "entropy": 0.6788679953617386,
145
+ "epoch": 1.0744570837642193,
146
+ "grad_norm": 0.3984375,
147
+ "learning_rate": 0.00019405971991583108,
148
+ "loss": 10.533837127685548,
149
+ "mean_token_accuracy": 0.8129133717923225,
150
+ "num_tokens": 1063936.0,
151
+ "step": 130
152
+ },
153
+ {
154
+ "entropy": 0.5800832805223763,
155
+ "epoch": 1.157187176835574,
156
+ "grad_norm": 0.333984375,
157
+ "learning_rate": 0.00018934339971482674,
158
+ "loss": 9.498150634765626,
159
+ "mean_token_accuracy": 0.8281555753201246,
160
+ "num_tokens": 1145856.0,
161
+ "step": 140
162
+ },
163
+ {
164
+ "entropy": 0.6344770405441522,
165
+ "epoch": 1.2399172699069285,
166
+ "grad_norm": 0.388671875,
167
+ "learning_rate": 0.00018335376920472097,
168
+ "loss": 10.217367553710938,
169
+ "mean_token_accuracy": 0.8195327781140804,
170
+ "num_tokens": 1227776.0,
171
+ "step": 150
172
+ },
173
+ {
174
+ "entropy": 0.6310219537466765,
175
+ "epoch": 1.3226473629782833,
176
+ "grad_norm": 0.380859375,
177
+ "learning_rate": 0.00017617619180688085,
178
+ "loss": 10.081737518310547,
179
+ "mean_token_accuracy": 0.8219178050756455,
180
+ "num_tokens": 1309696.0,
181
+ "step": 160
182
+ },
183
+ {
184
+ "entropy": 0.5863334746100008,
185
+ "epoch": 1.4053774560496382,
186
+ "grad_norm": 0.341796875,
187
+ "learning_rate": 0.00016791296140450545,
188
+ "loss": 9.392319488525391,
189
+ "mean_token_accuracy": 0.8319227002561093,
190
+ "num_tokens": 1391616.0,
191
+ "step": 170
192
+ },
193
+ {
194
+ "entropy": 0.6232900662347675,
195
+ "epoch": 1.4881075491209927,
196
+ "grad_norm": 0.44921875,
197
+ "learning_rate": 0.0001586818444637402,
198
+ "loss": 10.051438140869141,
199
+ "mean_token_accuracy": 0.8215264175087214,
200
+ "num_tokens": 1473536.0,
201
+ "step": 180
202
+ },
203
+ {
204
+ "entropy": 0.6163463215343654,
205
+ "epoch": 1.5708376421923473,
206
+ "grad_norm": 0.384765625,
207
+ "learning_rate": 0.0001486144016415862,
208
+ "loss": 9.878226470947265,
209
+ "mean_token_accuracy": 0.8220768082886935,
210
+ "num_tokens": 1555456.0,
211
+ "step": 190
212
+ },
213
+ {
214
+ "entropy": 0.588023800123483,
215
+ "epoch": 1.6535677352637022,
216
+ "grad_norm": 0.3515625,
217
+ "learning_rate": 0.00013785411280082746,
218
+ "loss": 9.45407943725586,
219
+ "mean_token_accuracy": 0.8305283710360527,
220
+ "num_tokens": 1637376.0,
221
+ "step": 200
222
+ },
223
+ {
224
+ "entropy": 0.599842881783843,
225
+ "epoch": 1.736297828335057,
226
+ "grad_norm": 0.37890625,
227
+ "learning_rate": 0.00012655433215401438,
228
+ "loss": 9.548422241210938,
229
+ "mean_token_accuracy": 0.8284735765308142,
230
+ "num_tokens": 1719296.0,
231
+ "step": 210
232
+ },
233
+ {
234
+ "entropy": 0.6552030782215297,
235
+ "epoch": 1.8190279214064116,
236
+ "grad_norm": 0.361328125,
237
+ "learning_rate": 0.00011487610267952142,
238
+ "loss": 10.46890640258789,
239
+ "mean_token_accuracy": 0.8134295467287302,
240
+ "num_tokens": 1801216.0,
241
+ "step": 220
242
+ },
243
+ {
244
+ "entropy": 0.5984975789207965,
245
+ "epoch": 1.9017580144777662,
246
+ "grad_norm": 0.353515625,
247
+ "learning_rate": 0.00010298586095833151,
248
+ "loss": 9.603475952148438,
249
+ "mean_token_accuracy": 0.827079250663519,
250
+ "num_tokens": 1883136.0,
251
+ "step": 230
252
+ },
253
+ {
254
+ "entropy": 0.5947112645488233,
255
+ "epoch": 1.984488107549121,
256
+ "grad_norm": 0.64453125,
257
+ "learning_rate": 9.10530651419099e-05,
258
+ "loss": 9.561953735351562,
259
+ "mean_token_accuracy": 0.8265655554831028,
260
+ "num_tokens": 1965056.0,
261
+ "step": 240
262
+ },
263
+ {
264
+ "epoch": 2.0,
265
+ "eval_entropy": 0.6100467269503793,
266
+ "eval_loss": 0.6102388501167297,
267
+ "eval_mean_token_accuracy": 0.8254676164582718,
268
+ "eval_num_tokens": 1980416.0,
269
+ "eval_runtime": 254.828,
270
+ "eval_samples_per_second": 0.844,
271
+ "eval_steps_per_second": 0.844,
272
+ "step": 242
273
+ }
274
+ ],
275
+ "logging_steps": 10,
276
+ "max_steps": 363,
277
+ "num_input_tokens_seen": 0,
278
+ "num_train_epochs": 3,
279
+ "save_steps": 500,
280
+ "stateful_callbacks": {
281
+ "TrainerControl": {
282
+ "args": {
283
+ "should_epoch_stop": false,
284
+ "should_evaluate": false,
285
+ "should_log": false,
286
+ "should_save": true,
287
+ "should_training_stop": false
288
+ },
289
+ "attributes": {}
290
+ }
291
+ },
292
+ "total_flos": 2.980881204394721e+17,
293
+ "train_batch_size": 1,
294
+ "trial_name": null,
295
+ "trial_params": null
296
+ }
checkpoint-242/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eaf4e1eba101412810b250e27914b2df87f93b0a9c62028451f50813e692b8e
3
+ size 5713
checkpoint-363/README.md ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: google/gemma-4-26b-a4b-it
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:google/gemma-4-26b-a4b-it
7
+ - lora
8
+ - sft
9
+ - transformers
10
+ - trl
11
+ ---
12
+
13
+ # Model Card for Model ID
14
+
15
+ <!-- Provide a quick summary of what the model is/does. -->
16
+
17
+
18
+
19
+ ## Model Details
20
+
21
+ ### Model Description
22
+
23
+ <!-- Provide a longer summary of what this model is. -->
24
+
25
+
26
+
27
+ - **Developed by:** [More Information Needed]
28
+ - **Funded by [optional]:** [More Information Needed]
29
+ - **Shared by [optional]:** [More Information Needed]
30
+ - **Model type:** [More Information Needed]
31
+ - **Language(s) (NLP):** [More Information Needed]
32
+ - **License:** [More Information Needed]
33
+ - **Finetuned from model [optional]:** [More Information Needed]
34
+
35
+ ### Model Sources [optional]
36
+
37
+ <!-- Provide the basic links for the model. -->
38
+
39
+ - **Repository:** [More Information Needed]
40
+ - **Paper [optional]:** [More Information Needed]
41
+ - **Demo [optional]:** [More Information Needed]
42
+
43
+ ## Uses
44
+
45
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
46
+
47
+ ### Direct Use
48
+
49
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
50
+
51
+ [More Information Needed]
52
+
53
+ ### Downstream Use [optional]
54
+
55
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
56
+
57
+ [More Information Needed]
58
+
59
+ ### Out-of-Scope Use
60
+
61
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
62
+
63
+ [More Information Needed]
64
+
65
+ ## Bias, Risks, and Limitations
66
+
67
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
68
+
69
+ [More Information Needed]
70
+
71
+ ### Recommendations
72
+
73
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
74
+
75
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
76
+
77
+ ## How to Get Started with the Model
78
+
79
+ Use the code below to get started with the model.
80
+
81
+ [More Information Needed]
82
+
83
+ ## Training Details
84
+
85
+ ### Training Data
86
+
87
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
88
+
89
+ [More Information Needed]
90
+
91
+ ### Training Procedure
92
+
93
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
94
+
95
+ #### Preprocessing [optional]
96
+
97
+ [More Information Needed]
98
+
99
+
100
+ #### Training Hyperparameters
101
+
102
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
103
+
104
+ #### Speeds, Sizes, Times [optional]
105
+
106
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
107
+
108
+ [More Information Needed]
109
+
110
+ ## Evaluation
111
+
112
+ <!-- This section describes the evaluation protocols and provides the results. -->
113
+
114
+ ### Testing Data, Factors & Metrics
115
+
116
+ #### Testing Data
117
+
118
+ <!-- This should link to a Dataset Card if possible. -->
119
+
120
+ [More Information Needed]
121
+
122
+ #### Factors
123
+
124
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
125
+
126
+ [More Information Needed]
127
+
128
+ #### Metrics
129
+
130
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
131
+
132
+ [More Information Needed]
133
+
134
+ ### Results
135
+
136
+ [More Information Needed]
137
+
138
+ #### Summary
139
+
140
+
141
+
142
+ ## Model Examination [optional]
143
+
144
+ <!-- Relevant interpretability work for the model goes here -->
145
+
146
+ [More Information Needed]
147
+
148
+ ## Environmental Impact
149
+
150
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
151
+
152
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
153
+
154
+ - **Hardware Type:** [More Information Needed]
155
+ - **Hours used:** [More Information Needed]
156
+ - **Cloud Provider:** [More Information Needed]
157
+ - **Compute Region:** [More Information Needed]
158
+ - **Carbon Emitted:** [More Information Needed]
159
+
160
+ ## Technical Specifications [optional]
161
+
162
+ ### Model Architecture and Objective
163
+
164
+ [More Information Needed]
165
+
166
+ ### Compute Infrastructure
167
+
168
+ [More Information Needed]
169
+
170
+ #### Hardware
171
+
172
+ [More Information Needed]
173
+
174
+ #### Software
175
+
176
+ [More Information Needed]
177
+
178
+ ## Citation [optional]
179
+
180
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
181
+
182
+ **BibTeX:**
183
+
184
+ [More Information Needed]
185
+
186
+ **APA:**
187
+
188
+ [More Information Needed]
189
+
190
+ ## Glossary [optional]
191
+
192
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
193
+
194
+ [More Information Needed]
195
+
196
+ ## More Information [optional]
197
+
198
+ [More Information Needed]
199
+
200
+ ## Model Card Authors [optional]
201
+
202
+ [More Information Needed]
203
+
204
+ ## Model Card Contact
205
+
206
+ [More Information Needed]
207
+ ### Framework versions
208
+
209
+ - PEFT 0.18.2.dev0
checkpoint-363/adapter_config.json ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": null,
6
+ "base_model_name_or_path": "google/gemma-4-26b-a4b-it",
7
+ "bias": "none",
8
+ "corda_config": null,
9
+ "ensure_weight_tying": false,
10
+ "eva_config": null,
11
+ "exclude_modules": null,
12
+ "fan_in_fan_out": false,
13
+ "inference_mode": true,
14
+ "init_lora_weights": true,
15
+ "layer_replication": null,
16
+ "layers_pattern": null,
17
+ "layers_to_transform": null,
18
+ "loftq_config": {},
19
+ "lora_alpha": 32,
20
+ "lora_bias": false,
21
+ "lora_dropout": 0.05,
22
+ "lora_ga_config": null,
23
+ "megatron_config": null,
24
+ "megatron_core": "megatron.core",
25
+ "modules_to_save": null,
26
+ "peft_type": "LORA",
27
+ "peft_version": "0.18.2.dev0@e7355a3b2233820f6f30e558ce133ed22673a087",
28
+ "qalora_group_size": 16,
29
+ "r": 16,
30
+ "rank_pattern": {},
31
+ "revision": null,
32
+ "target_modules": [
33
+ "model.language_model.layers.4.self_attn.k_proj",
34
+ "model.language_model.layers.17.self_attn.o_proj",
35
+ "model.language_model.layers.3.mlp.up_proj",
36
+ "model.language_model.layers.17.mlp.up_proj",
37
+ "model.language_model.layers.8.mlp.down_proj",
38
+ "model.language_model.layers.27.self_attn.k_proj",
39
+ "model.language_model.layers.28.mlp.down_proj",
40
+ "model.language_model.layers.6.mlp.up_proj",
41
+ "model.language_model.layers.24.self_attn.k_proj",
42
+ "model.language_model.layers.6.self_attn.q_proj",
43
+ "model.language_model.layers.17.self_attn.q_proj",
44
+ "model.language_model.layers.15.self_attn.k_proj",
45
+ "model.language_model.layers.24.mlp.up_proj",
46
+ "model.language_model.layers.19.mlp.gate_proj",
47
+ "model.language_model.layers.16.self_attn.k_proj",
48
+ "model.language_model.layers.26.self_attn.q_proj",
49
+ "model.language_model.layers.21.mlp.up_proj",
50
+ "model.language_model.layers.17.mlp.down_proj",
51
+ "model.language_model.layers.10.self_attn.v_proj",
52
+ "model.language_model.layers.25.mlp.down_proj",
53
+ "model.language_model.layers.11.mlp.up_proj",
54
+ "model.language_model.layers.2.self_attn.o_proj",
55
+ "model.language_model.layers.15.mlp.down_proj",
56
+ "model.language_model.layers.10.self_attn.k_proj",
57
+ "model.language_model.layers.15.self_attn.q_proj",
58
+ "model.language_model.layers.9.self_attn.v_proj",
59
+ "model.language_model.layers.27.self_attn.o_proj",
60
+ "model.language_model.layers.3.self_attn.v_proj",
61
+ "model.language_model.layers.10.self_attn.q_proj",
62
+ "model.language_model.layers.21.mlp.gate_proj",
63
+ "model.language_model.layers.25.self_attn.q_proj",
64
+ "model.language_model.layers.5.self_attn.o_proj",
65
+ "model.language_model.layers.2.mlp.gate_proj",
66
+ "model.language_model.layers.9.mlp.gate_proj",
67
+ "model.language_model.layers.19.self_attn.v_proj",
68
+ "model.language_model.layers.18.self_attn.k_proj",
69
+ "model.language_model.layers.19.mlp.down_proj",
70
+ "model.language_model.layers.23.self_attn.o_proj",
71
+ "model.language_model.layers.27.mlp.gate_proj",
72
+ "model.language_model.layers.0.mlp.up_proj",
73
+ "model.language_model.layers.20.mlp.gate_proj",
74
+ "model.language_model.layers.28.self_attn.o_proj",
75
+ "model.language_model.layers.4.self_attn.o_proj",
76
+ "model.language_model.layers.28.self_attn.v_proj",
77
+ "model.language_model.layers.11.self_attn.q_proj",
78
+ "model.language_model.layers.26.self_attn.o_proj",
79
+ "model.language_model.layers.9.mlp.down_proj",
80
+ "model.language_model.layers.27.self_attn.v_proj",
81
+ "model.language_model.layers.23.mlp.up_proj",
82
+ "model.language_model.layers.2.mlp.up_proj",
83
+ "model.language_model.layers.0.mlp.gate_proj",
84
+ "model.language_model.layers.18.self_attn.o_proj",
85
+ "model.language_model.layers.19.self_attn.k_proj",
86
+ "model.language_model.layers.10.mlp.down_proj",
87
+ "model.language_model.layers.10.mlp.gate_proj",
88
+ "model.language_model.layers.0.self_attn.o_proj",
89
+ "model.language_model.layers.20.mlp.down_proj",
90
+ "model.language_model.layers.10.self_attn.o_proj",
91
+ "model.language_model.layers.15.self_attn.o_proj",
92
+ "model.language_model.layers.18.mlp.down_proj",
93
+ "model.language_model.layers.1.self_attn.v_proj",
94
+ "model.language_model.layers.13.self_attn.q_proj",
95
+ "model.language_model.layers.18.self_attn.q_proj",
96
+ "model.language_model.layers.3.mlp.down_proj",
97
+ "model.language_model.layers.20.self_attn.k_proj",
98
+ "model.language_model.layers.14.self_attn.o_proj",
99
+ "model.language_model.layers.7.mlp.down_proj",
100
+ "model.language_model.layers.25.self_attn.v_proj",
101
+ "model.language_model.layers.29.mlp.gate_proj",
102
+ "model.language_model.layers.2.self_attn.k_proj",
103
+ "model.language_model.layers.5.self_attn.k_proj",
104
+ "model.language_model.layers.9.self_attn.k_proj",
105
+ "model.language_model.layers.1.mlp.gate_proj",
106
+ "model.language_model.layers.8.self_attn.o_proj",
107
+ "model.language_model.layers.22.self_attn.k_proj",
108
+ "model.language_model.layers.3.self_attn.q_proj",
109
+ "model.language_model.layers.23.self_attn.k_proj",
110
+ "model.language_model.layers.3.self_attn.k_proj",
111
+ "model.language_model.layers.19.self_attn.q_proj",
112
+ "model.language_model.layers.18.self_attn.v_proj",
113
+ "model.language_model.layers.10.mlp.up_proj",
114
+ "model.language_model.layers.11.mlp.gate_proj",
115
+ "model.language_model.layers.1.mlp.up_proj",
116
+ "model.language_model.layers.18.mlp.gate_proj",
117
+ "model.language_model.layers.8.mlp.gate_proj",
118
+ "model.language_model.layers.7.mlp.gate_proj",
119
+ "model.language_model.layers.8.mlp.up_proj",
120
+ "model.language_model.layers.5.self_attn.q_proj",
121
+ "model.language_model.layers.14.self_attn.k_proj",
122
+ "model.language_model.layers.22.self_attn.q_proj",
123
+ "model.language_model.layers.4.mlp.down_proj",
124
+ "model.language_model.layers.22.mlp.gate_proj",
125
+ "model.language_model.layers.15.self_attn.v_proj",
126
+ "model.language_model.layers.21.self_attn.o_proj",
127
+ "model.language_model.layers.11.self_attn.o_proj",
128
+ "model.language_model.layers.20.mlp.up_proj",
129
+ "model.language_model.layers.16.self_attn.q_proj",
130
+ "model.language_model.layers.1.self_attn.k_proj",
131
+ "model.language_model.layers.24.mlp.gate_proj",
132
+ "model.language_model.layers.26.mlp.gate_proj",
133
+ "model.language_model.layers.2.self_attn.q_proj",
134
+ "model.language_model.layers.4.mlp.gate_proj",
135
+ "model.language_model.layers.7.self_attn.q_proj",
136
+ "model.language_model.layers.14.self_attn.v_proj",
137
+ "model.language_model.layers.27.self_attn.q_proj",
138
+ "model.language_model.layers.29.mlp.up_proj",
139
+ "model.language_model.layers.28.self_attn.k_proj",
140
+ "model.language_model.layers.24.self_attn.o_proj",
141
+ "model.language_model.layers.26.self_attn.k_proj",
142
+ "model.language_model.layers.21.mlp.down_proj",
143
+ "model.language_model.layers.14.mlp.gate_proj",
144
+ "model.language_model.layers.25.mlp.up_proj",
145
+ "model.language_model.layers.27.mlp.down_proj",
146
+ "model.language_model.layers.20.self_attn.v_proj",
147
+ "model.language_model.layers.0.mlp.down_proj",
148
+ "model.language_model.layers.6.self_attn.v_proj",
149
+ "model.language_model.layers.4.self_attn.q_proj",
150
+ "model.language_model.layers.9.self_attn.q_proj",
151
+ "model.language_model.layers.0.self_attn.q_proj",
152
+ "model.language_model.layers.27.mlp.up_proj",
153
+ "model.language_model.layers.29.self_attn.k_proj",
154
+ "model.language_model.layers.29.self_attn.q_proj",
155
+ "model.language_model.layers.12.mlp.up_proj",
156
+ "model.language_model.layers.6.mlp.down_proj",
157
+ "model.language_model.layers.2.mlp.down_proj",
158
+ "model.language_model.layers.6.mlp.gate_proj",
159
+ "model.language_model.layers.24.self_attn.v_proj",
160
+ "model.language_model.layers.4.mlp.up_proj",
161
+ "model.language_model.layers.9.self_attn.o_proj",
162
+ "model.language_model.layers.22.self_attn.v_proj",
163
+ "model.language_model.layers.23.mlp.gate_proj",
164
+ "model.language_model.layers.5.mlp.down_proj",
165
+ "model.language_model.layers.13.self_attn.o_proj",
166
+ "model.language_model.layers.14.mlp.up_proj",
167
+ "model.language_model.layers.15.mlp.gate_proj",
168
+ "model.language_model.layers.19.self_attn.o_proj",
169
+ "model.language_model.layers.24.mlp.down_proj",
170
+ "model.language_model.layers.21.self_attn.q_proj",
171
+ "model.language_model.layers.15.mlp.up_proj",
172
+ "model.language_model.layers.26.mlp.up_proj",
173
+ "model.language_model.layers.26.mlp.down_proj",
174
+ "model.language_model.layers.25.self_attn.o_proj",
175
+ "model.language_model.layers.8.self_attn.v_proj",
176
+ "model.language_model.layers.12.self_attn.o_proj",
177
+ "model.language_model.layers.6.self_attn.k_proj",
178
+ "model.language_model.layers.17.mlp.gate_proj",
179
+ "model.language_model.layers.12.self_attn.k_proj",
180
+ "model.language_model.layers.13.mlp.down_proj",
181
+ "model.language_model.layers.1.mlp.down_proj",
182
+ "model.language_model.layers.3.mlp.gate_proj",
183
+ "model.language_model.layers.14.mlp.down_proj",
184
+ "model.language_model.layers.9.mlp.up_proj",
185
+ "model.language_model.layers.21.self_attn.k_proj",
186
+ "model.language_model.layers.6.self_attn.o_proj",
187
+ "model.language_model.layers.0.self_attn.v_proj",
188
+ "model.language_model.layers.16.mlp.down_proj",
189
+ "model.language_model.layers.8.self_attn.k_proj",
190
+ "model.language_model.layers.12.mlp.gate_proj",
191
+ "model.language_model.layers.7.self_attn.o_proj",
192
+ "model.language_model.layers.18.mlp.up_proj",
193
+ "model.language_model.layers.13.mlp.up_proj",
194
+ "model.language_model.layers.16.mlp.up_proj",
195
+ "model.language_model.layers.17.self_attn.k_proj",
196
+ "model.language_model.layers.25.self_attn.k_proj",
197
+ "model.language_model.layers.8.self_attn.q_proj",
198
+ "model.language_model.layers.4.self_attn.v_proj",
199
+ "model.language_model.layers.23.self_attn.q_proj",
200
+ "model.language_model.layers.1.self_attn.o_proj",
201
+ "model.language_model.layers.5.mlp.up_proj",
202
+ "model.language_model.layers.13.self_attn.k_proj",
203
+ "model.language_model.layers.7.self_attn.k_proj",
204
+ "model.language_model.layers.22.self_attn.o_proj",
205
+ "model.language_model.layers.22.mlp.up_proj",
206
+ "model.language_model.layers.16.self_attn.o_proj",
207
+ "model.language_model.layers.24.self_attn.q_proj",
208
+ "model.language_model.layers.12.self_attn.q_proj",
209
+ "model.language_model.layers.2.self_attn.v_proj",
210
+ "model.language_model.layers.12.self_attn.v_proj",
211
+ "model.language_model.layers.13.mlp.gate_proj",
212
+ "model.language_model.layers.12.mlp.down_proj",
213
+ "model.language_model.layers.14.self_attn.q_proj",
214
+ "model.language_model.layers.26.self_attn.v_proj",
215
+ "model.language_model.layers.28.mlp.up_proj",
216
+ "model.language_model.layers.19.mlp.up_proj",
217
+ "model.language_model.layers.16.mlp.gate_proj",
218
+ "model.language_model.layers.7.self_attn.v_proj",
219
+ "model.language_model.layers.25.mlp.gate_proj",
220
+ "model.language_model.layers.13.self_attn.v_proj",
221
+ "model.language_model.layers.20.self_attn.q_proj",
222
+ "model.language_model.layers.5.mlp.gate_proj",
223
+ "model.language_model.layers.1.self_attn.q_proj",
224
+ "model.language_model.layers.11.mlp.down_proj",
225
+ "model.language_model.layers.0.self_attn.k_proj",
226
+ "model.language_model.layers.21.self_attn.v_proj",
227
+ "model.language_model.layers.28.self_attn.q_proj",
228
+ "model.language_model.layers.29.self_attn.o_proj",
229
+ "model.language_model.layers.11.self_attn.k_proj",
230
+ "model.language_model.layers.29.mlp.down_proj",
231
+ "model.language_model.layers.7.mlp.up_proj",
232
+ "model.language_model.layers.22.mlp.down_proj",
233
+ "model.language_model.layers.20.self_attn.o_proj",
234
+ "model.language_model.layers.3.self_attn.o_proj",
235
+ "model.language_model.layers.23.mlp.down_proj",
236
+ "model.language_model.layers.16.self_attn.v_proj",
237
+ "model.language_model.layers.28.mlp.gate_proj"
238
+ ],
239
+ "target_parameters": null,
240
+ "task_type": "CAUSAL_LM",
241
+ "trainable_token_indices": null,
242
+ "use_bdlora": null,
243
+ "use_dora": false,
244
+ "use_qalora": false,
245
+ "use_rslora": false
246
+ }
checkpoint-363/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fc9c87e6f1fd7f94d16281ee425e6587460ef9e8104aac93d25e0de6b7b31a9
3
+ size 37232104
checkpoint-363/chat_template.jinja ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- macro format_parameters(properties, required) -%}
2
+ {%- set standard_keys = ['description', 'type', 'properties', 'required', 'nullable'] -%}
3
+ {%- set ns = namespace(found_first=false) -%}
4
+ {%- for key, value in properties | dictsort -%}
5
+ {%- set add_comma = false -%}
6
+ {%- if key not in standard_keys -%}
7
+ {%- if ns.found_first %},{% endif -%}
8
+ {%- set ns.found_first = true -%}
9
+ {{ key }}:{
10
+ {%- if value['description'] -%}
11
+ description:<|"|>{{ value['description'] }}<|"|>
12
+ {%- set add_comma = true -%}
13
+ {%- endif -%}
14
+ {%- if value['nullable'] %}
15
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
16
+ nullable:true
17
+ {%- endif -%}
18
+ {%- if value['type'] | upper == 'STRING' -%}
19
+ {%- if value['enum'] -%}
20
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
21
+ enum:{{ format_argument(value['enum']) }}
22
+ {%- endif -%}
23
+ {%- elif value['type'] | upper == 'OBJECT' -%}
24
+ ,properties:{
25
+ {%- if value['properties'] is defined and value['properties'] is mapping -%}
26
+ {{- format_parameters(value['properties'], value['required'] | default([])) -}}
27
+ {%- elif value is mapping -%}
28
+ {{- format_parameters(value, value['required'] | default([])) -}}
29
+ {%- endif -%}
30
+ }
31
+ {%- if value['required'] -%}
32
+ ,required:[
33
+ {%- for item in value['required'] | default([]) -%}
34
+ <|"|>{{- item -}}<|"|>
35
+ {%- if not loop.last %},{% endif -%}
36
+ {%- endfor -%}
37
+ ]
38
+ {%- endif -%}
39
+ {%- elif value['type'] | upper == 'ARRAY' -%}
40
+ {%- if value['items'] is mapping and value['items'] -%}
41
+ ,items:{
42
+ {%- set ns_items = namespace(found_first=false) -%}
43
+ {%- for item_key, item_value in value['items'] | dictsort -%}
44
+ {%- if item_value is not none -%}
45
+ {%- if ns_items.found_first %},{% endif -%}
46
+ {%- set ns_items.found_first = true -%}
47
+ {%- if item_key == 'properties' -%}
48
+ properties:{
49
+ {%- if item_value is mapping -%}
50
+ {{- format_parameters(item_value, value['items']['required'] | default([])) -}}
51
+ {%- endif -%}
52
+ }
53
+ {%- elif item_key == 'required' -%}
54
+ required:[
55
+ {%- for req_item in item_value -%}
56
+ <|"|>{{- req_item -}}<|"|>
57
+ {%- if not loop.last %},{% endif -%}
58
+ {%- endfor -%}
59
+ ]
60
+ {%- elif item_key == 'type' -%}
61
+ {%- if item_value is string -%}
62
+ type:{{ format_argument(item_value | upper) }}
63
+ {%- else -%}
64
+ type:{{ format_argument(item_value | map('upper') | list) }}
65
+ {%- endif -%}
66
+ {%- else -%}
67
+ {{ item_key }}:{{ format_argument(item_value) }}
68
+ {%- endif -%}
69
+ {%- endif -%}
70
+ {%- endfor -%}
71
+ }
72
+ {%- endif -%}
73
+ {%- endif -%}
74
+ {%- if add_comma %},{%- else -%} {%- set add_comma = true -%} {% endif -%}
75
+ type:<|"|>{{ value['type'] | upper }}<|"|>}
76
+ {%- endif -%}
77
+ {%- endfor -%}
78
+ {%- endmacro -%}
79
+ {%- macro format_function_declaration(tool_data) -%}
80
+ declaration:{{- tool_data['function']['name'] -}}{description:<|"|>{{- tool_data['function']['description'] -}}<|"|>
81
+ {%- set params = tool_data['function']['parameters'] -%}
82
+ {%- if params -%}
83
+ ,parameters:{
84
+ {%- if params['properties'] -%}
85
+ properties:{ {{- format_parameters(params['properties'], params['required']) -}} },
86
+ {%- endif -%}
87
+ {%- if params['required'] -%}
88
+ required:[
89
+ {%- for item in params['required'] -%}
90
+ <|"|>{{- item -}}<|"|>
91
+ {{- ',' if not loop.last -}}
92
+ {%- endfor -%}
93
+ ],
94
+ {%- endif -%}
95
+ {%- if params['type'] -%}
96
+ type:<|"|>{{- params['type'] | upper -}}<|"|>}
97
+ {%- endif -%}
98
+ {%- endif -%}
99
+ {%- if 'response' in tool_data['function'] -%}
100
+ {%- set response_declaration = tool_data['function']['response'] -%}
101
+ ,response:{
102
+ {%- if response_declaration['description'] -%}
103
+ description:<|"|>{{- response_declaration['description'] -}}<|"|>,
104
+ {%- endif -%}
105
+ {%- if response_declaration['type'] | upper == 'OBJECT' -%}
106
+ type:<|"|>{{- response_declaration['type'] | upper -}}<|"|>}
107
+ {%- endif -%}
108
+ {%- endif -%}
109
+ }
110
+ {%- endmacro -%}
111
+ {%- macro format_argument(argument, escape_keys=True) -%}
112
+ {%- if argument is string -%}
113
+ {{- '<|"|>' + argument + '<|"|>' -}}
114
+ {%- elif argument is boolean -%}
115
+ {{- 'true' if argument else 'false' -}}
116
+ {%- elif argument is mapping -%}
117
+ {{- '{' -}}
118
+ {%- set ns = namespace(found_first=false) -%}
119
+ {%- for key, value in argument | dictsort -%}
120
+ {%- if ns.found_first %},{% endif -%}
121
+ {%- set ns.found_first = true -%}
122
+ {%- if escape_keys -%}
123
+ {{- '<|"|>' + key + '<|"|>' -}}
124
+ {%- else -%}
125
+ {{- key -}}
126
+ {%- endif -%}
127
+ :{{- format_argument(value, escape_keys=escape_keys) -}}
128
+ {%- endfor -%}
129
+ {{- '}' -}}
130
+ {%- elif argument is sequence -%}
131
+ {{- '[' -}}
132
+ {%- for item in argument -%}
133
+ {{- format_argument(item, escape_keys=escape_keys) -}}
134
+ {%- if not loop.last %},{% endif -%}
135
+ {%- endfor -%}
136
+ {{- ']' -}}
137
+ {%- else -%}
138
+ {{- argument -}}
139
+ {%- endif -%}
140
+ {%- endmacro -%}
141
+ {%- macro strip_thinking(text) -%}
142
+ {%- set ns = namespace(result='') -%}
143
+ {%- for part in text.split('<channel|>') -%}
144
+ {%- if '<|channel>' in part -%}
145
+ {%- set ns.result = ns.result + part.split('<|channel>')[0] -%}
146
+ {%- else -%}
147
+ {%- set ns.result = ns.result + part -%}
148
+ {%- endif -%}
149
+ {%- endfor -%}
150
+ {{- ns.result | trim -}}
151
+ {%- endmacro -%}
152
+
153
+ {%- set ns = namespace(prev_message_type=None) -%}
154
+ {%- set loop_messages = messages -%}
155
+ {{ bos_token }}
156
+ {#- Handle System/Tool Definitions Block -#}
157
+ {%- if (enable_thinking is defined and enable_thinking) or tools or messages[0]['role'] in ['system', 'developer'] -%}
158
+ {{- '<|turn>system\n' -}}
159
+
160
+ {#- Inject Thinking token at the very top of the FIRST system turn -#}
161
+ {%- if enable_thinking is defined and enable_thinking -%}
162
+ {{- '<|think|>' -}}
163
+ {%- set ns.prev_message_type = 'think' -%}
164
+ {%- endif -%}
165
+
166
+ {%- if messages[0]['role'] in ['system', 'developer'] -%}
167
+ {{- messages[0]['content'] | trim -}}
168
+ {%- set loop_messages = messages[1:] -%}
169
+ {%- endif -%}
170
+
171
+ {%- if tools -%}
172
+ {%- for tool in tools %}
173
+ {{- '<|tool>' -}}
174
+ {{- format_function_declaration(tool) | trim -}}
175
+ {{- '<tool|>' -}}
176
+ {%- endfor %}
177
+ {%- set ns.prev_message_type = 'tool' -%}
178
+ {%- endif -%}
179
+
180
+ {{- '<turn|>\n' -}}
181
+ {%- endif %}
182
+
183
+ {#- Loop through messages -#}
184
+ {%- for message in loop_messages -%}
185
+ {%- set ns.prev_message_type = None -%}
186
+ {%- set role = 'model' if message['role'] == 'assistant' else message['role'] -%}
187
+ {{- '<|turn>' + role + '\n' }}
188
+
189
+ {%- if message['tool_calls'] -%}
190
+ {%- for tool_call in message['tool_calls'] -%}
191
+ {%- set function = tool_call['function'] -%}
192
+ {{- '<|tool_call>call:' + function['name'] + '{' -}}
193
+ {%- if function['arguments'] is mapping -%}
194
+ {%- set ns_args = namespace(found_first=false) -%}
195
+ {%- for key, value in function['arguments'] | dictsort -%}
196
+ {%- if ns_args.found_first %},{% endif -%}
197
+ {%- set ns_args.found_first = true -%}
198
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
199
+ {%- endfor -%}
200
+ {%- elif function['arguments'] is string -%}
201
+ {{- function['arguments'] -}}
202
+ {%- endif -%}
203
+ {{- '}<tool_call|>' -}}
204
+ {%- endfor -%}
205
+ {%- set ns.prev_message_type = 'tool_call' -%}
206
+ {%- endif -%}
207
+
208
+ {%- if message['tool_responses'] -%}
209
+ {#- Tool Response handling -#}
210
+ {%- for tool_response in message['tool_responses'] -%}
211
+ {{- '<|tool_response>' -}}
212
+ {%- if tool_response['response'] is mapping -%}
213
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{' -}}
214
+ {%- for key, value in tool_response['response'] | dictsort -%}
215
+ {{- key -}}:{{- format_argument(value, escape_keys=False) -}}
216
+ {%- if not loop.last %},{% endif -%}
217
+ {%- endfor -%}
218
+ {{- '}' -}}
219
+ {%- else -%}
220
+ {{- 'response:' + tool_response['name'] | default('unknown') + '{value:' + format_argument(tool_response['response'], escape_keys=False) + '}' -}}
221
+ {%- endif -%}
222
+ {{- '<tool_response|>' -}}
223
+ {%- endfor -%}
224
+ {%- set ns.prev_message_type = 'tool_response' -%}
225
+ {%- endif -%}
226
+
227
+ {%- if message['content'] is string -%}
228
+ {%- if role == 'model' -%}
229
+ {{- strip_thinking(message['content']) -}}
230
+ {%- else -%}
231
+ {{- message['content'] | trim -}}
232
+ {%- endif -%}
233
+ {%- elif message['content'] is sequence -%}
234
+ {%- for item in message['content'] -%}
235
+ {%- if item['type'] == 'text' -%}
236
+ {%- if role == 'model' -%}
237
+ {{- strip_thinking(item['text']) -}}
238
+ {%- else -%}
239
+ {{- item['text'] | trim -}}
240
+ {%- endif -%}
241
+ {%- elif item['type'] == 'image' -%}
242
+ {{- '\n\n<|image|>\n\n' -}}
243
+ {%- set ns.prev_message_type = 'image' -%}
244
+ {%- elif item['type'] == 'audio' -%}
245
+ {{- '<|audio|>' -}}
246
+ {%- set ns.prev_message_type = 'audio' -%}
247
+ {%- elif item['type'] == 'video' -%}
248
+ {{- '\n\n<|video|>\n\n' -}}
249
+ {%- set ns.prev_message_type = 'video' -%}
250
+ {%- endif -%}
251
+ {%- endfor -%}
252
+ {%- endif -%}
253
+
254
+ {%- if not (message['tool_responses'] and not message['content']) -%}
255
+ {{- '<turn|>\n' -}}
256
+ {%- endif -%}
257
+ {%- endfor -%}
258
+
259
+ {%- if add_generation_prompt -%}
260
+ {%- if ns.prev_message_type != 'tool_response' -%}
261
+ {{- '<|turn>model\n' -}}
262
+ {%- endif -%}
263
+ {%- if not enable_thinking | default(false) -%}
264
+ {{- '<|channel>thought\n<channel|>' -}}
265
+ {%- endif -%}
266
+ {%- endif -%}
checkpoint-363/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:704bd56fcfb881393baa83113d34a7cf3d7745744dc252c023ac9d7dee1ed1e8
3
+ size 38230093
checkpoint-363/processor_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_ms_per_token": 40,
3
+ "audio_seq_length": 750,
4
+ "feature_extractor": {
5
+ "dither": 0.0,
6
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
7
+ "feature_size": 128,
8
+ "fft_length": 512,
9
+ "fft_overdrive": false,
10
+ "frame_length": 320,
11
+ "hop_length": 160,
12
+ "input_scale_factor": 1.0,
13
+ "max_frequency": 8000.0,
14
+ "mel_floor": 0.001,
15
+ "min_frequency": 0.0,
16
+ "padding_side": "right",
17
+ "padding_value": 0.0,
18
+ "per_bin_mean": null,
19
+ "per_bin_stddev": null,
20
+ "preemphasis": 0.0,
21
+ "preemphasis_htk_flavor": true,
22
+ "return_attention_mask": true,
23
+ "sampling_rate": 16000
24
+ },
25
+ "image_processor": {
26
+ "do_convert_rgb": true,
27
+ "do_normalize": false,
28
+ "do_rescale": true,
29
+ "do_resize": true,
30
+ "image_mean": [
31
+ 0.0,
32
+ 0.0,
33
+ 0.0
34
+ ],
35
+ "image_processor_type": "Gemma4ImageProcessor",
36
+ "image_seq_length": 280,
37
+ "image_std": [
38
+ 1.0,
39
+ 1.0,
40
+ 1.0
41
+ ],
42
+ "max_soft_tokens": 280,
43
+ "patch_size": 16,
44
+ "pooling_kernel_size": 3,
45
+ "resample": 3,
46
+ "rescale_factor": 0.00392156862745098
47
+ },
48
+ "image_seq_length": 280,
49
+ "processor_class": "Gemma4Processor",
50
+ "video_processor": {
51
+ "do_convert_rgb": true,
52
+ "do_normalize": true,
53
+ "do_rescale": true,
54
+ "do_resize": true,
55
+ "do_sample_frames": true,
56
+ "image_mean": [
57
+ 0.0,
58
+ 0.0,
59
+ 0.0
60
+ ],
61
+ "image_std": [
62
+ 1.0,
63
+ 1.0,
64
+ 1.0
65
+ ],
66
+ "max_soft_tokens": 70,
67
+ "num_frames": 32,
68
+ "patch_size": 16,
69
+ "pooling_kernel_size": 3,
70
+ "resample": 3,
71
+ "rescale_factor": 0.00392156862745098,
72
+ "return_metadata": false,
73
+ "video_processor_type": "Gemma4VideoProcessor"
74
+ }
75
+ }
checkpoint-363/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b277407ef7cf34b19b7f76b4063ef02c942ff37191ea6603533d3e5bb877696d
3
+ size 14645
checkpoint-363/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a27b1dfda3b0692906cbec10c46258cb4f99d28ec188b1d9b066e584a6708792
3
+ size 1465
checkpoint-363/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2619fe11b50dbed06ac443c51d757b354d0b62d64baa514404d4e84e6713519
3
+ size 32169780
checkpoint-363/tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [
17
+ "<|video|>"
18
+ ],
19
+ "image_token": "<|image|>",
20
+ "is_local": false,
21
+ "mask_token": "<mask>",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "model_specific_special_tokens": {
24
+ "audio_token": "<|audio|>",
25
+ "boa_token": "<|audio>",
26
+ "boi_token": "<|image>",
27
+ "eoa_token": "<audio|>",
28
+ "eoc_token": "<channel|>",
29
+ "eoi_token": "<image|>",
30
+ "eot_token": "<turn|>",
31
+ "escape_token": "<|\"|>",
32
+ "etc_token": "<tool_call|>",
33
+ "etd_token": "<tool|>",
34
+ "etr_token": "<tool_response|>",
35
+ "image_token": "<|image|>",
36
+ "soc_token": "<|channel>",
37
+ "sot_token": "<|turn>",
38
+ "stc_token": "<|tool_call>",
39
+ "std_token": "<|tool>",
40
+ "str_token": "<|tool_response>",
41
+ "think_token": "<|think|>"
42
+ },
43
+ "pad_token": "<pad>",
44
+ "padding_side": "left",
45
+ "processor_class": "Gemma4Processor",
46
+ "response_schema": {
47
+ "properties": {
48
+ "content": {
49
+ "type": "string"
50
+ },
51
+ "role": {
52
+ "const": "assistant"
53
+ },
54
+ "thinking": {
55
+ "type": "string"
56
+ },
57
+ "tool_calls": {
58
+ "items": {
59
+ "properties": {
60
+ "function": {
61
+ "properties": {
62
+ "arguments": {
63
+ "additionalProperties": {},
64
+ "type": "object",
65
+ "x-parser": "gemma4-tool-call"
66
+ },
67
+ "name": {
68
+ "type": "string"
69
+ }
70
+ },
71
+ "type": "object",
72
+ "x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})"
73
+ },
74
+ "type": {
75
+ "const": "function"
76
+ }
77
+ },
78
+ "type": "object"
79
+ },
80
+ "type": "array",
81
+ "x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>"
82
+ }
83
+ },
84
+ "type": "object",
85
+ "x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
86
+ },
87
+ "soc_token": "<|channel>",
88
+ "sot_token": "<|turn>",
89
+ "stc_token": "<|tool_call>",
90
+ "std_token": "<|tool>",
91
+ "str_token": "<|tool_response>",
92
+ "think_token": "<|think|>",
93
+ "tokenizer_class": "GemmaTokenizer",
94
+ "unk_token": "<unk>"
95
+ }
checkpoint-363/trainer_state.json ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 363,
3
+ "best_metric": 0.6060348153114319,
4
+ "best_model_checkpoint": "/home/plucky/ml-workspace/models/gemma4-26b-securecode/checkpoint-363",
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 363,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "entropy": 1.1113492242991925,
14
+ "epoch": 0.0827300930713547,
15
+ "grad_norm": 10.3125,
16
+ "learning_rate": 1.8e-05,
17
+ "loss": 93.48836059570313,
18
+ "mean_token_accuracy": 0.4107020549476147,
19
+ "num_tokens": 81920.0,
20
+ "step": 10
21
+ },
22
+ {
23
+ "entropy": 0.8875315530225635,
24
+ "epoch": 0.1654601861427094,
25
+ "grad_norm": 6.15625,
26
+ "learning_rate": 3.8e-05,
27
+ "loss": 67.76697998046875,
28
+ "mean_token_accuracy": 0.5182974558323622,
29
+ "num_tokens": 163840.0,
30
+ "step": 20
31
+ },
32
+ {
33
+ "entropy": 0.673606987670064,
34
+ "epoch": 0.2481902792140641,
35
+ "grad_norm": 2.421875,
36
+ "learning_rate": 5.8e-05,
37
+ "loss": 37.221334838867186,
38
+ "mean_token_accuracy": 0.6476027386263012,
39
+ "num_tokens": 245760.0,
40
+ "step": 30
41
+ },
42
+ {
43
+ "entropy": 1.0845661748200655,
44
+ "epoch": 0.3309203722854188,
45
+ "grad_norm": 1.3671875,
46
+ "learning_rate": 7.800000000000001e-05,
47
+ "loss": 22.017848205566406,
48
+ "mean_token_accuracy": 0.7083170266821981,
49
+ "num_tokens": 327680.0,
50
+ "step": 40
51
+ },
52
+ {
53
+ "entropy": 1.1636322166770696,
54
+ "epoch": 0.4136504653567735,
55
+ "grad_norm": 0.703125,
56
+ "learning_rate": 9.8e-05,
57
+ "loss": 17.47879638671875,
58
+ "mean_token_accuracy": 0.7332558700814843,
59
+ "num_tokens": 409600.0,
60
+ "step": 50
61
+ },
62
+ {
63
+ "entropy": 0.9551631901413202,
64
+ "epoch": 0.4963805584281282,
65
+ "grad_norm": 0.40625,
66
+ "learning_rate": 0.000118,
67
+ "loss": 15.09481201171875,
68
+ "mean_token_accuracy": 0.7555772982537746,
69
+ "num_tokens": 491520.0,
70
+ "step": 60
71
+ },
72
+ {
73
+ "entropy": 0.8048430571332574,
74
+ "epoch": 0.5791106514994829,
75
+ "grad_norm": 0.375,
76
+ "learning_rate": 0.000138,
77
+ "loss": 13.297686767578124,
78
+ "mean_token_accuracy": 0.7774828754365444,
79
+ "num_tokens": 573440.0,
80
+ "step": 70
81
+ },
82
+ {
83
+ "entropy": 0.8100443260744215,
84
+ "epoch": 0.6618407445708376,
85
+ "grad_norm": 0.4609375,
86
+ "learning_rate": 0.00015800000000000002,
87
+ "loss": 12.752572631835937,
88
+ "mean_token_accuracy": 0.7837084107100963,
89
+ "num_tokens": 655360.0,
90
+ "step": 80
91
+ },
92
+ {
93
+ "entropy": 0.7172152267768979,
94
+ "epoch": 0.7445708376421923,
95
+ "grad_norm": 2.1875,
96
+ "learning_rate": 0.00017800000000000002,
97
+ "loss": 11.629959106445312,
98
+ "mean_token_accuracy": 0.799449609220028,
99
+ "num_tokens": 737280.0,
100
+ "step": 90
101
+ },
102
+ {
103
+ "entropy": 0.7284062243998051,
104
+ "epoch": 0.827300930713547,
105
+ "grad_norm": 0.40625,
106
+ "learning_rate": 0.00019800000000000002,
107
+ "loss": 11.506278991699219,
108
+ "mean_token_accuracy": 0.8022871781140566,
109
+ "num_tokens": 819200.0,
110
+ "step": 100
111
+ },
112
+ {
113
+ "entropy": 0.6922262106090784,
114
+ "epoch": 0.9100310237849017,
115
+ "grad_norm": 0.341796875,
116
+ "learning_rate": 0.00019942266891397815,
117
+ "loss": 11.149666595458985,
118
+ "mean_token_accuracy": 0.8068982377648354,
119
+ "num_tokens": 901120.0,
120
+ "step": 110
121
+ },
122
+ {
123
+ "entropy": 0.6608987387269736,
124
+ "epoch": 0.9927611168562565,
125
+ "grad_norm": 0.373046875,
126
+ "learning_rate": 0.00019743551343638324,
127
+ "loss": 10.666960906982421,
128
+ "mean_token_accuracy": 0.8124388422816992,
129
+ "num_tokens": 983040.0,
130
+ "step": 120
131
+ },
132
+ {
133
+ "epoch": 1.0,
134
+ "eval_entropy": 0.6862195637336997,
135
+ "eval_loss": 0.6695265769958496,
136
+ "eval_mean_token_accuracy": 0.8135074851124786,
137
+ "eval_num_tokens": 990208.0,
138
+ "eval_runtime": 255.0413,
139
+ "eval_samples_per_second": 0.843,
140
+ "eval_steps_per_second": 0.843,
141
+ "step": 121
142
+ },
143
+ {
144
+ "entropy": 0.6788679953617386,
145
+ "epoch": 1.0744570837642193,
146
+ "grad_norm": 0.3984375,
147
+ "learning_rate": 0.00019405971991583108,
148
+ "loss": 10.533837127685548,
149
+ "mean_token_accuracy": 0.8129133717923225,
150
+ "num_tokens": 1063936.0,
151
+ "step": 130
152
+ },
153
+ {
154
+ "entropy": 0.5800832805223763,
155
+ "epoch": 1.157187176835574,
156
+ "grad_norm": 0.333984375,
157
+ "learning_rate": 0.00018934339971482674,
158
+ "loss": 9.498150634765626,
159
+ "mean_token_accuracy": 0.8281555753201246,
160
+ "num_tokens": 1145856.0,
161
+ "step": 140
162
+ },
163
+ {
164
+ "entropy": 0.6344770405441522,
165
+ "epoch": 1.2399172699069285,
166
+ "grad_norm": 0.388671875,
167
+ "learning_rate": 0.00018335376920472097,
168
+ "loss": 10.217367553710938,
169
+ "mean_token_accuracy": 0.8195327781140804,
170
+ "num_tokens": 1227776.0,
171
+ "step": 150
172
+ },
173
+ {
174
+ "entropy": 0.6310219537466765,
175
+ "epoch": 1.3226473629782833,
176
+ "grad_norm": 0.380859375,
177
+ "learning_rate": 0.00017617619180688085,
178
+ "loss": 10.081737518310547,
179
+ "mean_token_accuracy": 0.8219178050756455,
180
+ "num_tokens": 1309696.0,
181
+ "step": 160
182
+ },
183
+ {
184
+ "entropy": 0.5863334746100008,
185
+ "epoch": 1.4053774560496382,
186
+ "grad_norm": 0.341796875,
187
+ "learning_rate": 0.00016791296140450545,
188
+ "loss": 9.392319488525391,
189
+ "mean_token_accuracy": 0.8319227002561093,
190
+ "num_tokens": 1391616.0,
191
+ "step": 170
192
+ },
193
+ {
194
+ "entropy": 0.6232900662347675,
195
+ "epoch": 1.4881075491209927,
196
+ "grad_norm": 0.44921875,
197
+ "learning_rate": 0.0001586818444637402,
198
+ "loss": 10.051438140869141,
199
+ "mean_token_accuracy": 0.8215264175087214,
200
+ "num_tokens": 1473536.0,
201
+ "step": 180
202
+ },
203
+ {
204
+ "entropy": 0.6163463215343654,
205
+ "epoch": 1.5708376421923473,
206
+ "grad_norm": 0.384765625,
207
+ "learning_rate": 0.0001486144016415862,
208
+ "loss": 9.878226470947265,
209
+ "mean_token_accuracy": 0.8220768082886935,
210
+ "num_tokens": 1555456.0,
211
+ "step": 190
212
+ },
213
+ {
214
+ "entropy": 0.588023800123483,
215
+ "epoch": 1.6535677352637022,
216
+ "grad_norm": 0.3515625,
217
+ "learning_rate": 0.00013785411280082746,
218
+ "loss": 9.45407943725586,
219
+ "mean_token_accuracy": 0.8305283710360527,
220
+ "num_tokens": 1637376.0,
221
+ "step": 200
222
+ },
223
+ {
224
+ "entropy": 0.599842881783843,
225
+ "epoch": 1.736297828335057,
226
+ "grad_norm": 0.37890625,
227
+ "learning_rate": 0.00012655433215401438,
228
+ "loss": 9.548422241210938,
229
+ "mean_token_accuracy": 0.8284735765308142,
230
+ "num_tokens": 1719296.0,
231
+ "step": 210
232
+ },
233
+ {
234
+ "entropy": 0.6552030782215297,
235
+ "epoch": 1.8190279214064116,
236
+ "grad_norm": 0.361328125,
237
+ "learning_rate": 0.00011487610267952142,
238
+ "loss": 10.46890640258789,
239
+ "mean_token_accuracy": 0.8134295467287302,
240
+ "num_tokens": 1801216.0,
241
+ "step": 220
242
+ },
243
+ {
244
+ "entropy": 0.5984975789207965,
245
+ "epoch": 1.9017580144777662,
246
+ "grad_norm": 0.353515625,
247
+ "learning_rate": 0.00010298586095833151,
248
+ "loss": 9.603475952148438,
249
+ "mean_token_accuracy": 0.827079250663519,
250
+ "num_tokens": 1883136.0,
251
+ "step": 230
252
+ },
253
+ {
254
+ "entropy": 0.5947112645488233,
255
+ "epoch": 1.984488107549121,
256
+ "grad_norm": 0.64453125,
257
+ "learning_rate": 9.10530651419099e-05,
258
+ "loss": 9.561953735351562,
259
+ "mean_token_accuracy": 0.8265655554831028,
260
+ "num_tokens": 1965056.0,
261
+ "step": 240
262
+ },
263
+ {
264
+ "epoch": 2.0,
265
+ "eval_entropy": 0.6100467269503793,
266
+ "eval_loss": 0.6102388501167297,
267
+ "eval_mean_token_accuracy": 0.8254676164582718,
268
+ "eval_num_tokens": 1980416.0,
269
+ "eval_runtime": 254.828,
270
+ "eval_samples_per_second": 0.844,
271
+ "eval_steps_per_second": 0.844,
272
+ "step": 242
273
+ },
274
+ {
275
+ "entropy": 0.5080371947511088,
276
+ "epoch": 2.066184074457084,
277
+ "grad_norm": 0.453125,
278
+ "learning_rate": 7.924777985705556e-05,
279
+ "loss": 8.056553649902344,
280
+ "mean_token_accuracy": 0.8497857213774814,
281
+ "num_tokens": 2045952.0,
282
+ "step": 250
283
+ },
284
+ {
285
+ "entropy": 0.5341692148707807,
286
+ "epoch": 2.1489141675284387,
287
+ "grad_norm": 0.384765625,
288
+ "learning_rate": 6.773825246734622e-05,
289
+ "loss": 8.356841278076171,
290
+ "mean_token_accuracy": 0.8431262206286192,
291
+ "num_tokens": 2127872.0,
292
+ "step": 260
293
+ },
294
+ {
295
+ "entropy": 0.5629857819527387,
296
+ "epoch": 2.231644260599793,
297
+ "grad_norm": 0.328125,
298
+ "learning_rate": 5.668851523397829e-05,
299
+ "loss": 9.067486572265626,
300
+ "mean_token_accuracy": 0.8315435405820608,
301
+ "num_tokens": 2209792.0,
302
+ "step": 270
303
+ },
304
+ {
305
+ "entropy": 0.5280973493587225,
306
+ "epoch": 2.314374353671148,
307
+ "grad_norm": 0.361328125,
308
+ "learning_rate": 4.625604754968839e-05,
309
+ "loss": 8.390058135986328,
310
+ "mean_token_accuracy": 0.8423923663794994,
311
+ "num_tokens": 2291712.0,
312
+ "step": 280
313
+ },
314
+ {
315
+ "entropy": 0.5421305931173265,
316
+ "epoch": 2.3971044467425027,
317
+ "grad_norm": 0.353515625,
318
+ "learning_rate": 3.658953156328857e-05,
319
+ "loss": 8.713886260986328,
320
+ "mean_token_accuracy": 0.8375489212572574,
321
+ "num_tokens": 2373632.0,
322
+ "step": 290
323
+ },
324
+ {
325
+ "entropy": 0.5257686520460993,
326
+ "epoch": 2.479834539813857,
327
+ "grad_norm": 0.373046875,
328
+ "learning_rate": 2.7826733181357932e-05,
329
+ "loss": 8.388682556152343,
330
+ "mean_token_accuracy": 0.8447284691035748,
331
+ "num_tokens": 2455552.0,
332
+ "step": 300
333
+ },
334
+ {
335
+ "entropy": 0.5735760541632772,
336
+ "epoch": 2.562564632885212,
337
+ "grad_norm": 0.421875,
338
+ "learning_rate": 2.0092538646774072e-05,
339
+ "loss": 9.259294891357422,
340
+ "mean_token_accuracy": 0.8287671197205781,
341
+ "num_tokens": 2537472.0,
342
+ "step": 310
343
+ },
344
+ {
345
+ "entropy": 0.5352369678206742,
346
+ "epoch": 2.6452947259565667,
347
+ "grad_norm": 0.369140625,
348
+ "learning_rate": 1.3497174676506674e-05,
349
+ "loss": 8.547685241699218,
350
+ "mean_token_accuracy": 0.8413160435855389,
351
+ "num_tokens": 2619392.0,
352
+ "step": 320
353
+ },
354
+ {
355
+ "entropy": 0.540962244477123,
356
+ "epoch": 2.7280248190279215,
357
+ "grad_norm": 0.365234375,
358
+ "learning_rate": 8.134637525034839e-06,
359
+ "loss": 8.591437530517577,
360
+ "mean_token_accuracy": 0.838882090896368,
361
+ "num_tokens": 2701312.0,
362
+ "step": 330
363
+ },
364
+ {
365
+ "entropy": 0.5567054254934192,
366
+ "epoch": 2.8107549120992763,
367
+ "grad_norm": 0.353515625,
368
+ "learning_rate": 4.081353362167406e-06,
369
+ "loss": 8.788534545898438,
370
+ "mean_token_accuracy": 0.8374510746449232,
371
+ "num_tokens": 2783232.0,
372
+ "step": 340
373
+ },
374
+ {
375
+ "entropy": 0.5575114467181266,
376
+ "epoch": 2.8934850051706307,
377
+ "grad_norm": 0.35546875,
378
+ "learning_rate": 1.3950890573852126e-06,
379
+ "loss": 8.935771179199218,
380
+ "mean_token_accuracy": 0.8345768079161644,
381
+ "num_tokens": 2865152.0,
382
+ "step": 350
383
+ },
384
+ {
385
+ "entropy": 0.5235911178402602,
386
+ "epoch": 2.9762150982419855,
387
+ "grad_norm": 0.36328125,
388
+ "learning_rate": 1.1412889406192673e-07,
389
+ "loss": 8.273484039306641,
390
+ "mean_token_accuracy": 0.8450831711292267,
391
+ "num_tokens": 2947072.0,
392
+ "step": 360
393
+ },
394
+ {
395
+ "epoch": 3.0,
396
+ "eval_entropy": 0.5448623623265777,
397
+ "eval_loss": 0.6060348153114319,
398
+ "eval_mean_token_accuracy": 0.8278159535208414,
399
+ "eval_num_tokens": 2970624.0,
400
+ "eval_runtime": 254.9017,
401
+ "eval_samples_per_second": 0.843,
402
+ "eval_steps_per_second": 0.843,
403
+ "step": 363
404
+ }
405
+ ],
406
+ "logging_steps": 10,
407
+ "max_steps": 363,
408
+ "num_input_tokens_seen": 0,
409
+ "num_train_epochs": 3,
410
+ "save_steps": 500,
411
+ "stateful_callbacks": {
412
+ "TrainerControl": {
413
+ "args": {
414
+ "should_epoch_stop": false,
415
+ "should_evaluate": false,
416
+ "should_log": false,
417
+ "should_save": true,
418
+ "should_training_stop": true
419
+ },
420
+ "attributes": {}
421
+ }
422
+ },
423
+ "total_flos": 4.471321806592082e+17,
424
+ "train_batch_size": 1,
425
+ "trial_name": null,
426
+ "trial_params": null
427
+ }
checkpoint-363/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eaf4e1eba101412810b250e27914b2df87f93b0a9c62028451f50813e692b8e
3
+ size 5713
processor_config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_ms_per_token": 40,
3
+ "audio_seq_length": 750,
4
+ "feature_extractor": {
5
+ "dither": 0.0,
6
+ "feature_extractor_type": "Gemma4AudioFeatureExtractor",
7
+ "feature_size": 128,
8
+ "fft_length": 512,
9
+ "fft_overdrive": false,
10
+ "frame_length": 320,
11
+ "hop_length": 160,
12
+ "input_scale_factor": 1.0,
13
+ "max_frequency": 8000.0,
14
+ "mel_floor": 0.001,
15
+ "min_frequency": 0.0,
16
+ "padding_side": "right",
17
+ "padding_value": 0.0,
18
+ "per_bin_mean": null,
19
+ "per_bin_stddev": null,
20
+ "preemphasis": 0.0,
21
+ "preemphasis_htk_flavor": true,
22
+ "return_attention_mask": true,
23
+ "sampling_rate": 16000
24
+ },
25
+ "image_processor": {
26
+ "do_convert_rgb": true,
27
+ "do_normalize": false,
28
+ "do_rescale": true,
29
+ "do_resize": true,
30
+ "image_mean": [
31
+ 0.0,
32
+ 0.0,
33
+ 0.0
34
+ ],
35
+ "image_processor_type": "Gemma4ImageProcessor",
36
+ "image_seq_length": 280,
37
+ "image_std": [
38
+ 1.0,
39
+ 1.0,
40
+ 1.0
41
+ ],
42
+ "max_soft_tokens": 280,
43
+ "patch_size": 16,
44
+ "pooling_kernel_size": 3,
45
+ "resample": 3,
46
+ "rescale_factor": 0.00392156862745098
47
+ },
48
+ "image_seq_length": 280,
49
+ "processor_class": "Gemma4Processor",
50
+ "video_processor": {
51
+ "do_convert_rgb": true,
52
+ "do_normalize": true,
53
+ "do_rescale": true,
54
+ "do_resize": true,
55
+ "do_sample_frames": true,
56
+ "image_mean": [
57
+ 0.0,
58
+ 0.0,
59
+ 0.0
60
+ ],
61
+ "image_std": [
62
+ 1.0,
63
+ 1.0,
64
+ 1.0
65
+ ],
66
+ "max_soft_tokens": 70,
67
+ "num_frames": 32,
68
+ "patch_size": 16,
69
+ "pooling_kernel_size": 3,
70
+ "resample": 3,
71
+ "rescale_factor": 0.00392156862745098,
72
+ "return_metadata": false,
73
+ "video_processor_type": "Gemma4VideoProcessor"
74
+ }
75
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc8d3a0ce36466ccc1278bf987df5f71db1719b9ca6b4118264f45cb627bfe0f
3
+ size 32169626
tokenizer_config.json ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "audio_token": "<|audio|>",
3
+ "backend": "tokenizers",
4
+ "boa_token": "<|audio>",
5
+ "boi_token": "<|image>",
6
+ "bos_token": "<bos>",
7
+ "eoa_token": "<audio|>",
8
+ "eoc_token": "<channel|>",
9
+ "eoi_token": "<image|>",
10
+ "eos_token": "<eos>",
11
+ "eot_token": "<turn|>",
12
+ "escape_token": "<|\"|>",
13
+ "etc_token": "<tool_call|>",
14
+ "etd_token": "<tool|>",
15
+ "etr_token": "<tool_response|>",
16
+ "extra_special_tokens": [
17
+ "<|video|>"
18
+ ],
19
+ "image_token": "<|image|>",
20
+ "is_local": false,
21
+ "mask_token": "<mask>",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "model_specific_special_tokens": {
24
+ "audio_token": "<|audio|>",
25
+ "boa_token": "<|audio>",
26
+ "boi_token": "<|image>",
27
+ "eoa_token": "<audio|>",
28
+ "eoc_token": "<channel|>",
29
+ "eoi_token": "<image|>",
30
+ "eot_token": "<turn|>",
31
+ "escape_token": "<|\"|>",
32
+ "etc_token": "<tool_call|>",
33
+ "etd_token": "<tool|>",
34
+ "etr_token": "<tool_response|>",
35
+ "image_token": "<|image|>",
36
+ "soc_token": "<|channel>",
37
+ "sot_token": "<|turn>",
38
+ "stc_token": "<|tool_call>",
39
+ "std_token": "<|tool>",
40
+ "str_token": "<|tool_response>",
41
+ "think_token": "<|think|>"
42
+ },
43
+ "pad_token": "<pad>",
44
+ "padding_side": "right",
45
+ "processor_class": "Gemma4Processor",
46
+ "response_schema": {
47
+ "properties": {
48
+ "content": {
49
+ "type": "string"
50
+ },
51
+ "role": {
52
+ "const": "assistant"
53
+ },
54
+ "thinking": {
55
+ "type": "string"
56
+ },
57
+ "tool_calls": {
58
+ "items": {
59
+ "properties": {
60
+ "function": {
61
+ "properties": {
62
+ "arguments": {
63
+ "additionalProperties": {},
64
+ "type": "object",
65
+ "x-parser": "gemma4-tool-call"
66
+ },
67
+ "name": {
68
+ "type": "string"
69
+ }
70
+ },
71
+ "type": "object",
72
+ "x-regex": "call\\:(?P<name>\\w+)(?P<arguments>\\{.*\\})"
73
+ },
74
+ "type": {
75
+ "const": "function"
76
+ }
77
+ },
78
+ "type": "object"
79
+ },
80
+ "type": "array",
81
+ "x-regex-iterator": "<\\|tool_call>(.*?)<tool_call\\|>"
82
+ }
83
+ },
84
+ "type": "object",
85
+ "x-regex": "(\\<\\|channel\\>thought\\n(?P<thinking>.*?)\\<channel\\|\\>)?(?P<content>(?:(?!\\<\\|tool_call\\>)(?!\\<turn\\|\\>).)+)?(?P<tool_calls>\\<\\|tool_call\\>.*\\<tool_call\\|\\>)?(?:\\<turn\\|\\>)?"
86
+ },
87
+ "soc_token": "<|channel>",
88
+ "sot_token": "<|turn>",
89
+ "stc_token": "<|tool_call>",
90
+ "std_token": "<|tool>",
91
+ "str_token": "<|tool_response>",
92
+ "think_token": "<|think|>",
93
+ "tokenizer_class": "GemmaTokenizer",
94
+ "unk_token": "<unk>"
95
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eaf4e1eba101412810b250e27914b2df87f93b0a9c62028451f50813e692b8e
3
+ size 5713