Upload folder using huggingface_hub
Browse files- README.md +255 -0
- chat_template.jinja +140 -0
- config.json +69 -0
- generation_config.json +10 -0
- model.safetensors +3 -0
- processor_config.json +67 -0
- tokenizer.json +0 -0
- tokenizer_config.json +49 -0
README.md
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
base_model:
|
| 4 |
+
- zai-org/GLM-OCR
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [zai-org/GLM-OCR](https://huggingface.co/zai-org/GLM-OCR).
|
| 8 |
+
|
| 9 |
+
| File path | Size |
|
| 10 |
+
|------|------|
|
| 11 |
+
| model.safetensors | 3.8MB |
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
### Example usage:
|
| 15 |
+
|
| 16 |
+
```python
|
| 17 |
+
import torch
|
| 18 |
+
from transformers import AutoModelForImageTextToText, AutoProcessor
|
| 19 |
+
|
| 20 |
+
model_id = "tiny-random/glm-ocr"
|
| 21 |
+
model = AutoModelForImageTextToText.from_pretrained(
|
| 22 |
+
model_id, dtype=torch.bfloat16, device_map="cuda",
|
| 23 |
+
)
|
| 24 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
| 25 |
+
messages = [
|
| 26 |
+
{
|
| 27 |
+
"role": "user",
|
| 28 |
+
"content": [
|
| 29 |
+
{
|
| 30 |
+
"type": "image",
|
| 31 |
+
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
|
| 32 |
+
},
|
| 33 |
+
{"type": "text", "text": "Describe this image."},
|
| 34 |
+
],
|
| 35 |
+
}
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
# Preparation for inference
|
| 39 |
+
inputs = processor.apply_chat_template(
|
| 40 |
+
messages,
|
| 41 |
+
tokenize=True,
|
| 42 |
+
add_generation_prompt=True,
|
| 43 |
+
return_dict=True,
|
| 44 |
+
return_tensors="pt"
|
| 45 |
+
).to(model.device)
|
| 46 |
+
inputs.pop("token_type_ids", None)
|
| 47 |
+
generated_ids = model.generate(**inputs, max_new_tokens=16)
|
| 48 |
+
output_text = processor.decode(generated_ids[0], skip_special_tokens=False)
|
| 49 |
+
print(output_text)
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
### Codes to create this repo:
|
| 53 |
+
|
| 54 |
+
<details>
|
| 55 |
+
<summary>Click to expand</summary>
|
| 56 |
+
|
| 57 |
+
```python
|
| 58 |
+
import json
|
| 59 |
+
from copy import deepcopy
|
| 60 |
+
from pathlib import Path
|
| 61 |
+
|
| 62 |
+
import accelerate
|
| 63 |
+
import torch
|
| 64 |
+
import torch.nn as nn
|
| 65 |
+
from huggingface_hub import file_exists, hf_hub_download
|
| 66 |
+
from transformers import (
|
| 67 |
+
AutoConfig,
|
| 68 |
+
AutoModelForCausalLM,
|
| 69 |
+
AutoProcessor,
|
| 70 |
+
GenerationConfig,
|
| 71 |
+
GlmOcrForConditionalGeneration,
|
| 72 |
+
set_seed,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
source_model_id = "zai-org/GLM-OCR"
|
| 76 |
+
save_folder = "/tmp/tiny-random/glm-ocr"
|
| 77 |
+
|
| 78 |
+
processor = AutoProcessor.from_pretrained(
|
| 79 |
+
source_model_id, trust_remote_code=True)
|
| 80 |
+
processor.save_pretrained(save_folder)
|
| 81 |
+
|
| 82 |
+
with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f:
|
| 83 |
+
config_json: dict = json.load(f)
|
| 84 |
+
|
| 85 |
+
config_json['text_config'].update({
|
| 86 |
+
"head_dim": 32,
|
| 87 |
+
"hidden_size": 8,
|
| 88 |
+
"intermediate_size": 64,
|
| 89 |
+
"num_attention_heads": 8,
|
| 90 |
+
"num_hidden_layers": 2,
|
| 91 |
+
"num_key_value_heads": 4,
|
| 92 |
+
"rope_parameters": {
|
| 93 |
+
"rope_type": "default",
|
| 94 |
+
"mrope_section": [4, 4, 8],
|
| 95 |
+
"partial_rotary_factor": 1.0,
|
| 96 |
+
"rope_theta": 10000,
|
| 97 |
+
},
|
| 98 |
+
})
|
| 99 |
+
config_json['vision_config'].update({
|
| 100 |
+
"hidden_size": 32,
|
| 101 |
+
"depth": 2,
|
| 102 |
+
"num_heads": 1,
|
| 103 |
+
"intermediate_size": 64,
|
| 104 |
+
"out_hidden_size": 8,
|
| 105 |
+
})
|
| 106 |
+
with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f:
|
| 107 |
+
json.dump(config_json, f, indent=2)
|
| 108 |
+
|
| 109 |
+
config = AutoConfig.from_pretrained(
|
| 110 |
+
save_folder,
|
| 111 |
+
trust_remote_code=True,
|
| 112 |
+
)
|
| 113 |
+
print(config)
|
| 114 |
+
torch.set_default_dtype(torch.bfloat16)
|
| 115 |
+
model = GlmOcrForConditionalGeneration(config)
|
| 116 |
+
torch.set_default_dtype(torch.float32)
|
| 117 |
+
|
| 118 |
+
if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'):
|
| 119 |
+
model.generation_config = GenerationConfig.from_pretrained(
|
| 120 |
+
source_model_id, trust_remote_code=True,
|
| 121 |
+
)
|
| 122 |
+
model.generation_config.do_sample = True
|
| 123 |
+
print(model.generation_config)
|
| 124 |
+
|
| 125 |
+
model = model.cpu()
|
| 126 |
+
set_seed(42)
|
| 127 |
+
n_params = sum(p.numel() for p in model.parameters())
|
| 128 |
+
with torch.no_grad():
|
| 129 |
+
for name, p in sorted(model.named_parameters()):
|
| 130 |
+
torch.nn.init.normal_(p, 0, 0.1)
|
| 131 |
+
print(name, p.shape, p.numel() / n_params * 100, '%')
|
| 132 |
+
# MTP
|
| 133 |
+
set_seed(42)
|
| 134 |
+
config = config.get_text_config()
|
| 135 |
+
model.model.language_model.layers.append(nn.ModuleDict(dict(
|
| 136 |
+
shared_head=nn.ModuleDict(dict(
|
| 137 |
+
norm=nn.RMSNorm(config.hidden_size),
|
| 138 |
+
head=deepcopy(model.model.language_model.embed_tokens),
|
| 139 |
+
)),
|
| 140 |
+
embed_tokens=deepcopy(model.model.language_model.embed_tokens),
|
| 141 |
+
eh_proj=nn.Linear(config.hidden_size * 2,
|
| 142 |
+
config.hidden_size, bias=False),
|
| 143 |
+
enorm=nn.RMSNorm(config.hidden_size),
|
| 144 |
+
hnorm=nn.RMSNorm(config.hidden_size),
|
| 145 |
+
input_layernorm=nn.RMSNorm(config.hidden_size),
|
| 146 |
+
post_attention_layernorm=nn.RMSNorm(config.hidden_size),
|
| 147 |
+
post_self_attn_layernorm=nn.RMSNorm(config.hidden_size),
|
| 148 |
+
self_attn=deepcopy(model.model.language_model.layers[1].self_attn),
|
| 149 |
+
mlp=deepcopy(model.model.language_model.layers[1].mlp),
|
| 150 |
+
)))
|
| 151 |
+
# for i in range(1, len(model.model.language_model.layers)):
|
| 152 |
+
# model.model.language_model.layers[i].mlp.gate.e_score_correction_bias = torch.rand_like(
|
| 153 |
+
# model.model.language_model.layers[i].mlp.gate.e_score_correction_bias).float()
|
| 154 |
+
model.save_pretrained(save_folder)
|
| 155 |
+
print(model)
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
</details>
|
| 159 |
+
|
| 160 |
+
### Printing the model:
|
| 161 |
+
|
| 162 |
+
<details><summary>Click to expand</summary>
|
| 163 |
+
|
| 164 |
+
```text
|
| 165 |
+
GlmOcrForConditionalGeneration(
|
| 166 |
+
(model): GlmOcrModel(
|
| 167 |
+
(visual): GlmOcrVisionModel(
|
| 168 |
+
(patch_embed): GlmOcrVisionPatchEmbed(
|
| 169 |
+
(proj): Conv3d(3, 32, kernel_size=(2, 14, 14), stride=(2, 14, 14))
|
| 170 |
+
)
|
| 171 |
+
(rotary_pos_emb): GlmOcrVisionRotaryEmbedding()
|
| 172 |
+
(blocks): ModuleList(
|
| 173 |
+
(0-1): 2 x GlmOcrVisionBlock(
|
| 174 |
+
(norm1): GlmOcrRMSNorm((32,), eps=1e-05)
|
| 175 |
+
(norm2): GlmOcrRMSNorm((32,), eps=1e-05)
|
| 176 |
+
(attn): GlmOcrVisionAttention(
|
| 177 |
+
(qkv): Linear(in_features=32, out_features=96, bias=True)
|
| 178 |
+
(proj): Linear(in_features=32, out_features=32, bias=True)
|
| 179 |
+
(q_norm): GlmOcrRMSNorm((32,), eps=1e-05)
|
| 180 |
+
(k_norm): GlmOcrRMSNorm((32,), eps=1e-05)
|
| 181 |
+
)
|
| 182 |
+
(mlp): GlmOcrVisionMlp(
|
| 183 |
+
(gate_proj): Linear(in_features=32, out_features=64, bias=True)
|
| 184 |
+
(up_proj): Linear(in_features=32, out_features=64, bias=True)
|
| 185 |
+
(down_proj): Linear(in_features=64, out_features=32, bias=True)
|
| 186 |
+
(act_fn): SiLUActivation()
|
| 187 |
+
)
|
| 188 |
+
)
|
| 189 |
+
)
|
| 190 |
+
(merger): GlmOcrVisionPatchMerger(
|
| 191 |
+
(proj): Linear(in_features=8, out_features=8, bias=False)
|
| 192 |
+
(post_projection_norm): LayerNorm((8,), eps=1e-05, elementwise_affine=True)
|
| 193 |
+
(gate_proj): Linear(in_features=8, out_features=24, bias=False)
|
| 194 |
+
(up_proj): Linear(in_features=8, out_features=24, bias=False)
|
| 195 |
+
(down_proj): Linear(in_features=24, out_features=8, bias=False)
|
| 196 |
+
(act1): GELU(approximate='none')
|
| 197 |
+
(act_fn): SiLUActivation()
|
| 198 |
+
)
|
| 199 |
+
(downsample): Conv2d(32, 8, kernel_size=(2, 2), stride=(2, 2))
|
| 200 |
+
(post_layernorm): GlmOcrRMSNorm((32,), eps=1e-05)
|
| 201 |
+
)
|
| 202 |
+
(language_model): GlmOcrTextModel(
|
| 203 |
+
(embed_tokens): Embedding(59392, 8, padding_idx=59246)
|
| 204 |
+
(layers): ModuleList(
|
| 205 |
+
(0-1): 2 x GlmOcrTextDecoderLayer(
|
| 206 |
+
(self_attn): GlmOcrTextAttention(
|
| 207 |
+
(q_proj): Linear(in_features=8, out_features=256, bias=False)
|
| 208 |
+
(k_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 209 |
+
(v_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 210 |
+
(o_proj): Linear(in_features=256, out_features=8, bias=False)
|
| 211 |
+
)
|
| 212 |
+
(mlp): GlmOcrTextMLP(
|
| 213 |
+
(gate_up_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 214 |
+
(down_proj): Linear(in_features=64, out_features=8, bias=False)
|
| 215 |
+
(activation_fn): SiLUActivation()
|
| 216 |
+
)
|
| 217 |
+
(input_layernorm): GlmOcrRMSNorm((8,), eps=1e-05)
|
| 218 |
+
(post_attention_layernorm): GlmOcrRMSNorm((8,), eps=1e-05)
|
| 219 |
+
(post_self_attn_layernorm): GlmOcrRMSNorm((8,), eps=1e-05)
|
| 220 |
+
(post_mlp_layernorm): GlmOcrRMSNorm((8,), eps=1e-05)
|
| 221 |
+
)
|
| 222 |
+
(2): ModuleDict(
|
| 223 |
+
(shared_head): ModuleDict(
|
| 224 |
+
(norm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 225 |
+
(head): Embedding(59392, 8, padding_idx=59246)
|
| 226 |
+
)
|
| 227 |
+
(embed_tokens): Embedding(59392, 8, padding_idx=59246)
|
| 228 |
+
(eh_proj): Linear(in_features=16, out_features=8, bias=False)
|
| 229 |
+
(enorm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 230 |
+
(hnorm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 231 |
+
(input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 232 |
+
(post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 233 |
+
(post_self_attn_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True)
|
| 234 |
+
(self_attn): GlmOcrTextAttention(
|
| 235 |
+
(q_proj): Linear(in_features=8, out_features=256, bias=False)
|
| 236 |
+
(k_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 237 |
+
(v_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 238 |
+
(o_proj): Linear(in_features=256, out_features=8, bias=False)
|
| 239 |
+
)
|
| 240 |
+
(mlp): GlmOcrTextMLP(
|
| 241 |
+
(gate_up_proj): Linear(in_features=8, out_features=128, bias=False)
|
| 242 |
+
(down_proj): Linear(in_features=64, out_features=8, bias=False)
|
| 243 |
+
(activation_fn): SiLUActivation()
|
| 244 |
+
)
|
| 245 |
+
)
|
| 246 |
+
)
|
| 247 |
+
(norm): GlmOcrRMSNorm((8,), eps=1e-05)
|
| 248 |
+
(rotary_emb): GlmOcrTextRotaryEmbedding()
|
| 249 |
+
)
|
| 250 |
+
)
|
| 251 |
+
(lm_head): Linear(in_features=8, out_features=59392, bias=False)
|
| 252 |
+
)
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
</details>
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[gMASK]<sop>
|
| 2 |
+
{%- if tools -%}
|
| 3 |
+
<|system|>
|
| 4 |
+
# Tools
|
| 5 |
+
|
| 6 |
+
You may call one or more functions to assist with the user query.
|
| 7 |
+
|
| 8 |
+
You are provided with function signatures within <tools></tools> XML tags:
|
| 9 |
+
<tools>
|
| 10 |
+
{% for tool in tools %}
|
| 11 |
+
{{ tool | tojson(ensure_ascii=False) }}
|
| 12 |
+
{% endfor %}
|
| 13 |
+
</tools>
|
| 14 |
+
|
| 15 |
+
For each function call, output the function name and arguments within the following XML format:
|
| 16 |
+
<tool_call>{function-name}
|
| 17 |
+
<arg_key>{arg-key-1}</arg_key>
|
| 18 |
+
<arg_value>{arg-value-1}</arg_value>
|
| 19 |
+
<arg_key>{arg-key-2}</arg_key>
|
| 20 |
+
<arg_value>{arg-value-2}</arg_value>
|
| 21 |
+
...
|
| 22 |
+
</tool_call>{%- endif -%}
|
| 23 |
+
{%- macro visible_text(content) -%}
|
| 24 |
+
{%- if content is string -%}
|
| 25 |
+
{{- content }}
|
| 26 |
+
{%- elif content is iterable and content is not mapping -%}
|
| 27 |
+
{%- for item in content -%}
|
| 28 |
+
{%- if item is mapping and item.type == 'text' -%}
|
| 29 |
+
{{- item.text }}
|
| 30 |
+
{%- elif item is mapping and (item.type == 'image' or 'image' in item) -%}
|
| 31 |
+
<|begin_of_image|><|image|><|end_of_image|>
|
| 32 |
+
{%- elif item is mapping and (item.type == 'video' or 'video' in item) -%}
|
| 33 |
+
<|begin_of_video|><|video|><|end_of_video|>
|
| 34 |
+
{%- elif item is string -%}
|
| 35 |
+
{{- item }}
|
| 36 |
+
{%- endif -%}
|
| 37 |
+
{%- endfor -%}
|
| 38 |
+
{%- else -%}
|
| 39 |
+
{{- content }}
|
| 40 |
+
{%- endif -%}
|
| 41 |
+
{%- endmacro -%}
|
| 42 |
+
{%- set ns = namespace(last_user_index=-1) %}
|
| 43 |
+
{%- for m in messages %}
|
| 44 |
+
{%- if m.role == 'user' %}
|
| 45 |
+
{% set ns.last_user_index = loop.index0 -%}
|
| 46 |
+
{%- endif %}
|
| 47 |
+
{%- endfor %}
|
| 48 |
+
{% for m in messages %}
|
| 49 |
+
{%- if m.role == 'user' -%}<|user|>
|
| 50 |
+
{% if m.content is string %}
|
| 51 |
+
{{ m.content }}
|
| 52 |
+
{%- else %}
|
| 53 |
+
{%- for item in m.content %}
|
| 54 |
+
{% if item.type == 'video' or 'video' in item %}
|
| 55 |
+
<|begin_of_video|><|video|><|end_of_video|>{% elif item.type == 'image' or 'image' in item %}
|
| 56 |
+
<|begin_of_image|><|image|><|end_of_image|>{% elif item.type == 'text' %}
|
| 57 |
+
{{ item.text }}
|
| 58 |
+
{%- endif %}
|
| 59 |
+
{%- endfor %}
|
| 60 |
+
{%- endif %}
|
| 61 |
+
{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
|
| 62 |
+
{%- elif m.role == 'assistant' -%}
|
| 63 |
+
<|assistant|>
|
| 64 |
+
{%- set reasoning_content = '' %}
|
| 65 |
+
{%- set content = visible_text(m.content) %}
|
| 66 |
+
{%- if m.reasoning_content is string %}
|
| 67 |
+
{%- set reasoning_content = m.reasoning_content %}
|
| 68 |
+
{%- else %}
|
| 69 |
+
{%- if '</think>' in content %}
|
| 70 |
+
{%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
|
| 71 |
+
{%- set content = content.split('</think>')[-1].lstrip('\n') %}
|
| 72 |
+
{%- endif %}
|
| 73 |
+
{%- endif %}
|
| 74 |
+
{%- if loop.index0 > ns.last_user_index and reasoning_content -%}
|
| 75 |
+
{{ '\n<think>' + reasoning_content.strip() + '</think>'}}
|
| 76 |
+
{%- else -%}
|
| 77 |
+
{{ '\n<think></think>' }}
|
| 78 |
+
{%- endif -%}
|
| 79 |
+
{%- if content.strip() -%}
|
| 80 |
+
{{ '\n' + content.strip() }}
|
| 81 |
+
{%- endif -%}
|
| 82 |
+
{% if m.tool_calls %}
|
| 83 |
+
{% for tc in m.tool_calls %}
|
| 84 |
+
{%- if tc.function %}
|
| 85 |
+
{%- set tc = tc.function %}
|
| 86 |
+
{%- endif %}
|
| 87 |
+
{{ '\n<tool_call>' + tc.name }}
|
| 88 |
+
{% set _args = tc.arguments %}
|
| 89 |
+
{% for k, v in _args.items() %}
|
| 90 |
+
<arg_key>{{ k }}</arg_key>
|
| 91 |
+
<arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
|
| 92 |
+
{% endfor %}
|
| 93 |
+
</tool_call>{% endfor %}
|
| 94 |
+
{% endif %}
|
| 95 |
+
{%- elif m.role == 'tool' -%}
|
| 96 |
+
{%- if m.content is string -%}
|
| 97 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 98 |
+
{{- '<|observation|>' }}
|
| 99 |
+
{%- endif %}
|
| 100 |
+
{{- '\n<tool_response>\n' }}
|
| 101 |
+
{{- m.content }}
|
| 102 |
+
{{- '\n</tool_response>' }}
|
| 103 |
+
{% elif m.content is iterable and m.content is not mapping %}
|
| 104 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
|
| 105 |
+
{{- '<|observation|>' }}
|
| 106 |
+
{%- endif %}
|
| 107 |
+
{{- '\n<tool_response>\n' }}
|
| 108 |
+
{%- for tr in m.content -%}
|
| 109 |
+
{%- if tr is mapping and tr.type is defined -%}
|
| 110 |
+
{%- set t = tr.type | lower -%}
|
| 111 |
+
{%- if t == 'text' and tr.text is defined -%}
|
| 112 |
+
{{ tr.text }}
|
| 113 |
+
{%- elif t in ['image', 'image_url'] -%}
|
| 114 |
+
<|begin_of_image|><|image|><|end_of_image|>
|
| 115 |
+
{%- elif t in ['video', 'video_url'] -%}
|
| 116 |
+
<|begin_of_video|><|video|><|end_of_video|>
|
| 117 |
+
{%- else -%}
|
| 118 |
+
{{ tr | tojson(ensure_ascii=False) }}
|
| 119 |
+
{%- endif -%}
|
| 120 |
+
{%- else -%}
|
| 121 |
+
{{ tr.output if tr.output is defined else tr }}
|
| 122 |
+
{%- endif -%}
|
| 123 |
+
{%- endfor -%}
|
| 124 |
+
{{- '\n</tool_response>' }}
|
| 125 |
+
{%- else -%}
|
| 126 |
+
<|observation|>{% for tr in m.content %}
|
| 127 |
+
|
| 128 |
+
<tool_response>
|
| 129 |
+
{{ tr.output if tr.output is defined else tr }}
|
| 130 |
+
</tool_response>{% endfor -%}
|
| 131 |
+
{% endif -%}
|
| 132 |
+
{%- elif m.role == 'system' -%}
|
| 133 |
+
<|system|>
|
| 134 |
+
{{ visible_text(m.content) }}
|
| 135 |
+
{%- endif -%}
|
| 136 |
+
{%- endfor -%}
|
| 137 |
+
{%- if add_generation_prompt -%}
|
| 138 |
+
<|assistant|>
|
| 139 |
+
{{'<think></think>\n' if (enable_thinking is defined and not enable_thinking) else ''}}
|
| 140 |
+
{%- endif -%}
|
config.json
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"GlmOcrForConditionalGeneration"
|
| 4 |
+
],
|
| 5 |
+
"dtype": "bfloat16",
|
| 6 |
+
"image_end_token_id": 59257,
|
| 7 |
+
"image_start_token_id": 59256,
|
| 8 |
+
"image_token_id": 59280,
|
| 9 |
+
"model_type": "glm_ocr",
|
| 10 |
+
"text_config": {
|
| 11 |
+
"attention_bias": false,
|
| 12 |
+
"attention_dropout": 0.0,
|
| 13 |
+
"dtype": "bfloat16",
|
| 14 |
+
"eos_token_id": [
|
| 15 |
+
59246,
|
| 16 |
+
59253
|
| 17 |
+
],
|
| 18 |
+
"head_dim": 32,
|
| 19 |
+
"hidden_act": "silu",
|
| 20 |
+
"hidden_size": 8,
|
| 21 |
+
"initializer_range": 0.02,
|
| 22 |
+
"intermediate_size": 64,
|
| 23 |
+
"max_position_embeddings": 131072,
|
| 24 |
+
"model_type": "glm_ocr_text",
|
| 25 |
+
"num_attention_heads": 8,
|
| 26 |
+
"num_hidden_layers": 2,
|
| 27 |
+
"num_key_value_heads": 4,
|
| 28 |
+
"num_nextn_predict_layers": 1,
|
| 29 |
+
"pad_token_id": 59246,
|
| 30 |
+
"rms_norm_eps": 1e-05,
|
| 31 |
+
"rope_parameters": {
|
| 32 |
+
"mrope_section": [
|
| 33 |
+
4,
|
| 34 |
+
4,
|
| 35 |
+
8
|
| 36 |
+
],
|
| 37 |
+
"partial_rotary_factor": 1.0,
|
| 38 |
+
"rope_theta": 10000,
|
| 39 |
+
"rope_type": "default"
|
| 40 |
+
},
|
| 41 |
+
"tie_word_embeddings": false,
|
| 42 |
+
"use_cache": true,
|
| 43 |
+
"vocab_size": 59392
|
| 44 |
+
},
|
| 45 |
+
"tie_word_embeddings": false,
|
| 46 |
+
"transformers_version": "5.2.0.dev0",
|
| 47 |
+
"video_end_token_id": 59259,
|
| 48 |
+
"video_start_token_id": 59258,
|
| 49 |
+
"video_token_id": 59281,
|
| 50 |
+
"vision_config": {
|
| 51 |
+
"attention_bias": true,
|
| 52 |
+
"attention_dropout": 0.0,
|
| 53 |
+
"depth": 2,
|
| 54 |
+
"hidden_act": "silu",
|
| 55 |
+
"hidden_dropout_prob": 0.0,
|
| 56 |
+
"hidden_size": 32,
|
| 57 |
+
"image_size": 336,
|
| 58 |
+
"in_channels": 3,
|
| 59 |
+
"initializer_range": 0.02,
|
| 60 |
+
"intermediate_size": 64,
|
| 61 |
+
"model_type": "glm_ocr_vision",
|
| 62 |
+
"num_heads": 1,
|
| 63 |
+
"out_hidden_size": 8,
|
| 64 |
+
"patch_size": 14,
|
| 65 |
+
"rms_norm_eps": 1e-05,
|
| 66 |
+
"spatial_merge_size": 2,
|
| 67 |
+
"temporal_patch_size": 2
|
| 68 |
+
}
|
| 69 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"do_sample": true,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
59246,
|
| 6 |
+
59253
|
| 7 |
+
],
|
| 8 |
+
"pad_token_id": 59246,
|
| 9 |
+
"transformers_version": "5.2.0.dev0"
|
| 10 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c1887b001e63419d877771c4e5a93fdb1738b31b16b16fb511dc637e12b9b7a4
|
| 3 |
+
size 3977976
|
processor_config.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_processor": {
|
| 3 |
+
"data_format": "channels_first",
|
| 4 |
+
"do_convert_rgb": true,
|
| 5 |
+
"do_normalize": true,
|
| 6 |
+
"do_rescale": true,
|
| 7 |
+
"do_resize": true,
|
| 8 |
+
"image_mean": [
|
| 9 |
+
0.48145466,
|
| 10 |
+
0.4578275,
|
| 11 |
+
0.40821073
|
| 12 |
+
],
|
| 13 |
+
"image_processor_type": "Glm46VImageProcessorFast",
|
| 14 |
+
"image_std": [
|
| 15 |
+
0.26862954,
|
| 16 |
+
0.26130258,
|
| 17 |
+
0.27577711
|
| 18 |
+
],
|
| 19 |
+
"merge_size": 2,
|
| 20 |
+
"patch_size": 14,
|
| 21 |
+
"resample": 3,
|
| 22 |
+
"rescale_factor": 0.00392156862745098,
|
| 23 |
+
"size": {
|
| 24 |
+
"longest_edge": 9633792,
|
| 25 |
+
"shortest_edge": 12544
|
| 26 |
+
},
|
| 27 |
+
"temporal_patch_size": 2
|
| 28 |
+
},
|
| 29 |
+
"processor_class": "Glm46VProcessor",
|
| 30 |
+
"video_processor": {
|
| 31 |
+
"data_format": "channels_first",
|
| 32 |
+
"default_to_square": true,
|
| 33 |
+
"do_convert_rgb": true,
|
| 34 |
+
"do_normalize": true,
|
| 35 |
+
"do_rescale": true,
|
| 36 |
+
"do_resize": true,
|
| 37 |
+
"do_sample_frames": true,
|
| 38 |
+
"fps": 2,
|
| 39 |
+
"image_mean": [
|
| 40 |
+
0.48145466,
|
| 41 |
+
0.4578275,
|
| 42 |
+
0.40821073
|
| 43 |
+
],
|
| 44 |
+
"image_processor_type": "Glm46VImageProcessor",
|
| 45 |
+
"image_std": [
|
| 46 |
+
0.26862954,
|
| 47 |
+
0.26130258,
|
| 48 |
+
0.27577711
|
| 49 |
+
],
|
| 50 |
+
"max_duration": 300,
|
| 51 |
+
"max_image_size": {
|
| 52 |
+
"longest_edge": 47040000
|
| 53 |
+
},
|
| 54 |
+
"merge_size": 2,
|
| 55 |
+
"num_frames": 16,
|
| 56 |
+
"patch_size": 14,
|
| 57 |
+
"resample": 3,
|
| 58 |
+
"rescale_factor": 0.00392156862745098,
|
| 59 |
+
"return_metadata": false,
|
| 60 |
+
"size": {
|
| 61 |
+
"longest_edge": 9633792,
|
| 62 |
+
"shortest_edge": 12544
|
| 63 |
+
},
|
| 64 |
+
"temporal_patch_size": 2,
|
| 65 |
+
"video_processor_type": "Glm46VVideoProcessor"
|
| 66 |
+
}
|
| 67 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"backend": "tokenizers",
|
| 3 |
+
"clean_up_tokenization_spaces": false,
|
| 4 |
+
"eos_token": "<|endoftext|>",
|
| 5 |
+
"extra_special_tokens": [
|
| 6 |
+
"<|endoftext|>",
|
| 7 |
+
"[MASK]",
|
| 8 |
+
"[gMASK]",
|
| 9 |
+
"[sMASK]",
|
| 10 |
+
"<sop>",
|
| 11 |
+
"<eop>",
|
| 12 |
+
"<|system|>",
|
| 13 |
+
"<|user|>",
|
| 14 |
+
"<|assistant|>",
|
| 15 |
+
"<|observation|>",
|
| 16 |
+
"<|begin_of_image|>",
|
| 17 |
+
"<|end_of_image|>",
|
| 18 |
+
"<|begin_of_video|>",
|
| 19 |
+
"<|end_of_video|>",
|
| 20 |
+
"<|begin_of_audio|>",
|
| 21 |
+
"<|end_of_audio|>",
|
| 22 |
+
"<|begin_of_transcription|>",
|
| 23 |
+
"<|end_of_transcription|>",
|
| 24 |
+
"<|code_prefix|>",
|
| 25 |
+
"<|code_middle|>",
|
| 26 |
+
"<|code_suffix|>",
|
| 27 |
+
"<think>",
|
| 28 |
+
"</think>",
|
| 29 |
+
"<tool_call>",
|
| 30 |
+
"</tool_call>",
|
| 31 |
+
"<tool_response>",
|
| 32 |
+
"</tool_response>",
|
| 33 |
+
"<arg_key>",
|
| 34 |
+
"</arg_key>",
|
| 35 |
+
"<arg_value>",
|
| 36 |
+
"</arg_value>",
|
| 37 |
+
"/nothink",
|
| 38 |
+
"<|begin_of_box|>",
|
| 39 |
+
"<|end_of_box|>",
|
| 40 |
+
"<|image|>",
|
| 41 |
+
"<|video|>"
|
| 42 |
+
],
|
| 43 |
+
"is_local": false,
|
| 44 |
+
"model_max_length": 655380,
|
| 45 |
+
"pad_token": "<|endoftext|>",
|
| 46 |
+
"padding_side": "left",
|
| 47 |
+
"processor_class": "Glm46VProcessor",
|
| 48 |
+
"tokenizer_class": "TokenizersBackend"
|
| 49 |
+
}
|