wwhyyyyyy commited on
Commit
682c350
·
verified ·
1 Parent(s): 4200c3b

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -30,3 +30,7 @@ zzzmmz/SegAgent-Model/model-00004-of-00004.safetensors filter=lfs diff=lfs merge
30
  zzzmmz/SegAgent-Model/model-00002-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
31
  zzzmmz/SegAgent-Model/model-00001-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
32
  zzzmmz/SegAgent-Model/model-00003-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
30
  zzzmmz/SegAgent-Model/model-00002-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
31
  zzzmmz/SegAgent-Model/model-00001-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
32
  zzzmmz/SegAgent-Model/model-00003-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
33
+ segagent/zzzmmz/SegAgent-Model/model-00001-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ segagent/zzzmmz/SegAgent-Model/model-00002-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
35
+ segagent/zzzmmz/SegAgent-Model/model-00003-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
36
+ segagent/zzzmmz/SegAgent-Model/model-00004-of-00004.safetensors filter=lfs diff=lfs merge=lfs -text
segagent/zzzmmz/SegAgent-Model/.mdl ADDED
Binary file (44 Bytes). View file
 
segagent/zzzmmz/SegAgent-Model/.msc ADDED
Binary file (1.45 kB). View file
 
segagent/zzzmmz/SegAgent-Model/.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ Revision:master,CreatedAt:1754641259
segagent/zzzmmz/SegAgent-Model/README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ frameworks:
3
+ - Pytorch
4
+ license: Apache License 2.0
5
+ tasks:
6
+ - image-captioning
7
+
8
+ #model-type:
9
+ ##such as gpt、phi、llama、chatglm、baichuan, etc.
10
+ #- gpt
11
+
12
+ #domain:
13
+ ##such as nlp、cv、audio、multi-modal, etc.
14
+ #- nlp
15
+
16
+ #language:
17
+ ##language code list https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
18
+ #- cn
19
+
20
+ #metrics:
21
+ ##such as CIDEr、Blue、ROUGE, etc.
22
+ #- CIDEr
23
+
24
+ #tags:
25
+ ##various custom tags, including pretrained, fine-tuned, instruction-tuned, RL-tuned, and others
26
+ #- pretrained
27
+
28
+ #tools:
29
+ ##such as vllm、fastchat、llamacpp、AdaSeq, etc.
30
+ #- vllm
31
+ ---
32
+ ### You are viewing the default Readme template as no detailed model-card was provided by the model’s contributors. You can access the model files in the "Files and versions" tab.
33
+ #### Model files may be downloaded with ModelScope SDK or through git clone directly.
34
+
35
+ Download with ModelScope’s Python SDK
36
+ ```bash
37
+ #Install ModelScope
38
+ pip install modelscope
39
+ ```
40
+ ```python
41
+ #Download with ModelScope’s Python SDK
42
+ from modelscope import snapshot_download
43
+ model_dir = snapshot_download('zzzmmz/SegAgent-Model')
44
+ ```
45
+ Download with Git clone
46
+ ```
47
+ git clone https://www.modelscope.cn/zzzmmz/SegAgent-Model.git
48
+ ```
49
+
50
+ <p style="color: lightgrey;">If you are a contributor to this model, we invite you to promptly update the model card content according to <a href="https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88" style="color: lightgrey; text-decoration: underline;">the model contribution documentation</a>.</p>
segagent/zzzmmz/SegAgent-Model/config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/input/zhumuzhi.zmz/weight/models--Qwen--Qwen-VL-Chat",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen.QWenConfig",
9
+ "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
10
+ },
11
+ "bf16": true,
12
+ "emb_dropout_prob": 0.0,
13
+ "fp16": false,
14
+ "fp32": false,
15
+ "hidden_size": 4096,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 22016,
18
+ "kv_channels": 128,
19
+ "layer_norm_epsilon": 1e-06,
20
+ "max_position_embeddings": 8192,
21
+ "model_type": "qwen",
22
+ "no_bias": true,
23
+ "num_attention_heads": 32,
24
+ "num_hidden_layers": 32,
25
+ "onnx_safe": null,
26
+ "rotary_emb_base": 10000,
27
+ "rotary_pct": 1.0,
28
+ "scale_attn_weights": true,
29
+ "seq_length": 2048,
30
+ "tie_word_embeddings": false,
31
+ "tokenizer_type": "QWenTokenizer",
32
+ "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.37.2",
34
+ "use_cache": false,
35
+ "use_dynamic_ntk": true,
36
+ "use_flash_attn": false,
37
+ "use_logn_attn": true,
38
+ "visual": {
39
+ "heads": 16,
40
+ "image_size": 448,
41
+ "image_start_id": 151857,
42
+ "layers": 48,
43
+ "mlp_ratio": 4.9231,
44
+ "output_dim": 4096,
45
+ "patch_size": 14,
46
+ "width": 1664
47
+ },
48
+ "vocab_size": 151936
49
+ }
segagent/zzzmmz/SegAgent-Model/configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"image-captioning"}
segagent/zzzmmz/SegAgent-Model/configuration_qwen.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from transformers import PretrainedConfig
7
+
8
+
9
+ class QWenConfig(PretrainedConfig):
10
+ model_type = "qwen"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=151936,
16
+ hidden_size=4096,
17
+ num_hidden_layers=32,
18
+ num_attention_heads=32,
19
+ emb_dropout_prob=0.0,
20
+ attn_dropout_prob=0.0,
21
+ layer_norm_epsilon=1e-6,
22
+ initializer_range=0.02,
23
+ max_position_embeddings=8192,
24
+ scale_attn_weights=True,
25
+ use_cache=True,
26
+ bf16=False,
27
+ fp16=False,
28
+ fp32=False,
29
+ kv_channels=128,
30
+ rotary_pct=1.0,
31
+ rotary_emb_base=10000,
32
+ use_dynamic_ntk=True,
33
+ use_logn_attn=True,
34
+ use_flash_attn="auto",
35
+ intermediate_size=22016,
36
+ no_bias=True,
37
+ tie_word_embeddings=False,
38
+ **kwargs,
39
+ ):
40
+ self.vocab_size = vocab_size
41
+ self.hidden_size = hidden_size
42
+ self.intermediate_size = intermediate_size
43
+ self.num_hidden_layers = num_hidden_layers
44
+ self.num_attention_heads = num_attention_heads
45
+ self.emb_dropout_prob = emb_dropout_prob
46
+ self.attn_dropout_prob = attn_dropout_prob
47
+ self.layer_norm_epsilon = layer_norm_epsilon
48
+ self.initializer_range = initializer_range
49
+ self.scale_attn_weights = scale_attn_weights
50
+ self.use_cache = use_cache
51
+ self.max_position_embeddings = max_position_embeddings
52
+ self.bf16 = bf16
53
+ self.fp16 = fp16
54
+ self.fp32 = fp32
55
+ self.kv_channels = kv_channels
56
+ self.rotary_pct = rotary_pct
57
+ self.rotary_emb_base = rotary_emb_base
58
+ self.use_dynamic_ntk = use_dynamic_ntk
59
+ self.use_logn_attn = use_logn_attn
60
+ self.use_flash_attn = use_flash_attn
61
+ self.no_bias = no_bias
62
+ super().__init__(
63
+ tie_word_embeddings=tie_word_embeddings,
64
+ **kwargs
65
+ )
segagent/zzzmmz/SegAgent-Model/generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chat_format": "chatml",
3
+ "do_sample": true,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 512,
6
+ "max_window_size": 6144,
7
+ "pad_token_id": 151643,
8
+ "top_k": 0,
9
+ "top_p": 0.3,
10
+ "transformers_version": "4.37.2"
11
+ }
segagent/zzzmmz/SegAgent-Model/model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470ade4cefc50a771662a41077ea08f0d166c701eacb6a399652ccc383a5f386
3
+ size 4988485656
segagent/zzzmmz/SegAgent-Model/model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2ee9a55ff2501ed625d914fa21e16ff6751058d74f96e4d408cb7fc9718f348
3
+ size 4981246520
segagent/zzzmmz/SegAgent-Model/model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d665efb246e1fe8a80f2a26e6f4af2256c2a78f94b532486a751d5ee99b70599
3
+ size 4977360088
segagent/zzzmmz/SegAgent-Model/model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b975b9b3b9dcbf70e093f81684cf36b3c2e3c1aacf19546e6cb838addaa3446
3
+ size 4366885504
segagent/zzzmmz/SegAgent-Model/model.safetensors.index.json ADDED
@@ -0,0 +1,860 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 19313870336
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "transformer.h.0.attn.c_attn.bias": "model-00001-of-00004.safetensors",
8
+ "transformer.h.0.attn.c_attn.weight": "model-00001-of-00004.safetensors",
9
+ "transformer.h.0.attn.c_proj.weight": "model-00001-of-00004.safetensors",
10
+ "transformer.h.0.ln_1.weight": "model-00001-of-00004.safetensors",
11
+ "transformer.h.0.ln_2.weight": "model-00001-of-00004.safetensors",
12
+ "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
13
+ "transformer.h.0.mlp.w1.weight": "model-00001-of-00004.safetensors",
14
+ "transformer.h.0.mlp.w2.weight": "model-00001-of-00004.safetensors",
15
+ "transformer.h.1.attn.c_attn.bias": "model-00001-of-00004.safetensors",
16
+ "transformer.h.1.attn.c_attn.weight": "model-00001-of-00004.safetensors",
17
+ "transformer.h.1.attn.c_proj.weight": "model-00001-of-00004.safetensors",
18
+ "transformer.h.1.ln_1.weight": "model-00001-of-00004.safetensors",
19
+ "transformer.h.1.ln_2.weight": "model-00001-of-00004.safetensors",
20
+ "transformer.h.1.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
21
+ "transformer.h.1.mlp.w1.weight": "model-00001-of-00004.safetensors",
22
+ "transformer.h.1.mlp.w2.weight": "model-00001-of-00004.safetensors",
23
+ "transformer.h.10.attn.c_attn.bias": "model-00002-of-00004.safetensors",
24
+ "transformer.h.10.attn.c_attn.weight": "model-00002-of-00004.safetensors",
25
+ "transformer.h.10.attn.c_proj.weight": "model-00002-of-00004.safetensors",
26
+ "transformer.h.10.ln_1.weight": "model-00002-of-00004.safetensors",
27
+ "transformer.h.10.ln_2.weight": "model-00002-of-00004.safetensors",
28
+ "transformer.h.10.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
29
+ "transformer.h.10.mlp.w1.weight": "model-00002-of-00004.safetensors",
30
+ "transformer.h.10.mlp.w2.weight": "model-00002-of-00004.safetensors",
31
+ "transformer.h.11.attn.c_attn.bias": "model-00002-of-00004.safetensors",
32
+ "transformer.h.11.attn.c_attn.weight": "model-00002-of-00004.safetensors",
33
+ "transformer.h.11.attn.c_proj.weight": "model-00002-of-00004.safetensors",
34
+ "transformer.h.11.ln_1.weight": "model-00002-of-00004.safetensors",
35
+ "transformer.h.11.ln_2.weight": "model-00002-of-00004.safetensors",
36
+ "transformer.h.11.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
37
+ "transformer.h.11.mlp.w1.weight": "model-00002-of-00004.safetensors",
38
+ "transformer.h.11.mlp.w2.weight": "model-00002-of-00004.safetensors",
39
+ "transformer.h.12.attn.c_attn.bias": "model-00002-of-00004.safetensors",
40
+ "transformer.h.12.attn.c_attn.weight": "model-00002-of-00004.safetensors",
41
+ "transformer.h.12.attn.c_proj.weight": "model-00002-of-00004.safetensors",
42
+ "transformer.h.12.ln_1.weight": "model-00002-of-00004.safetensors",
43
+ "transformer.h.12.ln_2.weight": "model-00002-of-00004.safetensors",
44
+ "transformer.h.12.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
45
+ "transformer.h.12.mlp.w1.weight": "model-00002-of-00004.safetensors",
46
+ "transformer.h.12.mlp.w2.weight": "model-00002-of-00004.safetensors",
47
+ "transformer.h.13.attn.c_attn.bias": "model-00002-of-00004.safetensors",
48
+ "transformer.h.13.attn.c_attn.weight": "model-00002-of-00004.safetensors",
49
+ "transformer.h.13.attn.c_proj.weight": "model-00002-of-00004.safetensors",
50
+ "transformer.h.13.ln_1.weight": "model-00002-of-00004.safetensors",
51
+ "transformer.h.13.ln_2.weight": "model-00002-of-00004.safetensors",
52
+ "transformer.h.13.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
53
+ "transformer.h.13.mlp.w1.weight": "model-00002-of-00004.safetensors",
54
+ "transformer.h.13.mlp.w2.weight": "model-00002-of-00004.safetensors",
55
+ "transformer.h.14.attn.c_attn.bias": "model-00002-of-00004.safetensors",
56
+ "transformer.h.14.attn.c_attn.weight": "model-00002-of-00004.safetensors",
57
+ "transformer.h.14.attn.c_proj.weight": "model-00002-of-00004.safetensors",
58
+ "transformer.h.14.ln_1.weight": "model-00002-of-00004.safetensors",
59
+ "transformer.h.14.ln_2.weight": "model-00002-of-00004.safetensors",
60
+ "transformer.h.14.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
61
+ "transformer.h.14.mlp.w1.weight": "model-00002-of-00004.safetensors",
62
+ "transformer.h.14.mlp.w2.weight": "model-00002-of-00004.safetensors",
63
+ "transformer.h.15.attn.c_attn.bias": "model-00002-of-00004.safetensors",
64
+ "transformer.h.15.attn.c_attn.weight": "model-00002-of-00004.safetensors",
65
+ "transformer.h.15.attn.c_proj.weight": "model-00002-of-00004.safetensors",
66
+ "transformer.h.15.ln_1.weight": "model-00002-of-00004.safetensors",
67
+ "transformer.h.15.ln_2.weight": "model-00002-of-00004.safetensors",
68
+ "transformer.h.15.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
69
+ "transformer.h.15.mlp.w1.weight": "model-00002-of-00004.safetensors",
70
+ "transformer.h.15.mlp.w2.weight": "model-00002-of-00004.safetensors",
71
+ "transformer.h.16.attn.c_attn.bias": "model-00002-of-00004.safetensors",
72
+ "transformer.h.16.attn.c_attn.weight": "model-00002-of-00004.safetensors",
73
+ "transformer.h.16.attn.c_proj.weight": "model-00002-of-00004.safetensors",
74
+ "transformer.h.16.ln_1.weight": "model-00002-of-00004.safetensors",
75
+ "transformer.h.16.ln_2.weight": "model-00002-of-00004.safetensors",
76
+ "transformer.h.16.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
77
+ "transformer.h.16.mlp.w1.weight": "model-00002-of-00004.safetensors",
78
+ "transformer.h.16.mlp.w2.weight": "model-00002-of-00004.safetensors",
79
+ "transformer.h.17.attn.c_attn.bias": "model-00002-of-00004.safetensors",
80
+ "transformer.h.17.attn.c_attn.weight": "model-00002-of-00004.safetensors",
81
+ "transformer.h.17.attn.c_proj.weight": "model-00002-of-00004.safetensors",
82
+ "transformer.h.17.ln_1.weight": "model-00002-of-00004.safetensors",
83
+ "transformer.h.17.ln_2.weight": "model-00002-of-00004.safetensors",
84
+ "transformer.h.17.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
85
+ "transformer.h.17.mlp.w1.weight": "model-00002-of-00004.safetensors",
86
+ "transformer.h.17.mlp.w2.weight": "model-00002-of-00004.safetensors",
87
+ "transformer.h.18.attn.c_attn.bias": "model-00002-of-00004.safetensors",
88
+ "transformer.h.18.attn.c_attn.weight": "model-00002-of-00004.safetensors",
89
+ "transformer.h.18.attn.c_proj.weight": "model-00002-of-00004.safetensors",
90
+ "transformer.h.18.ln_1.weight": "model-00002-of-00004.safetensors",
91
+ "transformer.h.18.ln_2.weight": "model-00002-of-00004.safetensors",
92
+ "transformer.h.18.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
93
+ "transformer.h.18.mlp.w1.weight": "model-00002-of-00004.safetensors",
94
+ "transformer.h.18.mlp.w2.weight": "model-00002-of-00004.safetensors",
95
+ "transformer.h.19.attn.c_attn.bias": "model-00002-of-00004.safetensors",
96
+ "transformer.h.19.attn.c_attn.weight": "model-00002-of-00004.safetensors",
97
+ "transformer.h.19.attn.c_proj.weight": "model-00002-of-00004.safetensors",
98
+ "transformer.h.19.ln_1.weight": "model-00002-of-00004.safetensors",
99
+ "transformer.h.19.ln_2.weight": "model-00002-of-00004.safetensors",
100
+ "transformer.h.19.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
101
+ "transformer.h.19.mlp.w1.weight": "model-00002-of-00004.safetensors",
102
+ "transformer.h.19.mlp.w2.weight": "model-00002-of-00004.safetensors",
103
+ "transformer.h.2.attn.c_attn.bias": "model-00001-of-00004.safetensors",
104
+ "transformer.h.2.attn.c_attn.weight": "model-00001-of-00004.safetensors",
105
+ "transformer.h.2.attn.c_proj.weight": "model-00001-of-00004.safetensors",
106
+ "transformer.h.2.ln_1.weight": "model-00001-of-00004.safetensors",
107
+ "transformer.h.2.ln_2.weight": "model-00001-of-00004.safetensors",
108
+ "transformer.h.2.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
109
+ "transformer.h.2.mlp.w1.weight": "model-00001-of-00004.safetensors",
110
+ "transformer.h.2.mlp.w2.weight": "model-00001-of-00004.safetensors",
111
+ "transformer.h.20.attn.c_attn.bias": "model-00002-of-00004.safetensors",
112
+ "transformer.h.20.attn.c_attn.weight": "model-00002-of-00004.safetensors",
113
+ "transformer.h.20.attn.c_proj.weight": "model-00002-of-00004.safetensors",
114
+ "transformer.h.20.ln_1.weight": "model-00002-of-00004.safetensors",
115
+ "transformer.h.20.ln_2.weight": "model-00002-of-00004.safetensors",
116
+ "transformer.h.20.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
117
+ "transformer.h.20.mlp.w1.weight": "model-00002-of-00004.safetensors",
118
+ "transformer.h.20.mlp.w2.weight": "model-00002-of-00004.safetensors",
119
+ "transformer.h.21.attn.c_attn.bias": "model-00002-of-00004.safetensors",
120
+ "transformer.h.21.attn.c_attn.weight": "model-00002-of-00004.safetensors",
121
+ "transformer.h.21.attn.c_proj.weight": "model-00002-of-00004.safetensors",
122
+ "transformer.h.21.ln_1.weight": "model-00002-of-00004.safetensors",
123
+ "transformer.h.21.ln_2.weight": "model-00002-of-00004.safetensors",
124
+ "transformer.h.21.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
125
+ "transformer.h.21.mlp.w1.weight": "model-00002-of-00004.safetensors",
126
+ "transformer.h.21.mlp.w2.weight": "model-00003-of-00004.safetensors",
127
+ "transformer.h.22.attn.c_attn.bias": "model-00003-of-00004.safetensors",
128
+ "transformer.h.22.attn.c_attn.weight": "model-00003-of-00004.safetensors",
129
+ "transformer.h.22.attn.c_proj.weight": "model-00003-of-00004.safetensors",
130
+ "transformer.h.22.ln_1.weight": "model-00003-of-00004.safetensors",
131
+ "transformer.h.22.ln_2.weight": "model-00003-of-00004.safetensors",
132
+ "transformer.h.22.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
133
+ "transformer.h.22.mlp.w1.weight": "model-00003-of-00004.safetensors",
134
+ "transformer.h.22.mlp.w2.weight": "model-00003-of-00004.safetensors",
135
+ "transformer.h.23.attn.c_attn.bias": "model-00003-of-00004.safetensors",
136
+ "transformer.h.23.attn.c_attn.weight": "model-00003-of-00004.safetensors",
137
+ "transformer.h.23.attn.c_proj.weight": "model-00003-of-00004.safetensors",
138
+ "transformer.h.23.ln_1.weight": "model-00003-of-00004.safetensors",
139
+ "transformer.h.23.ln_2.weight": "model-00003-of-00004.safetensors",
140
+ "transformer.h.23.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
141
+ "transformer.h.23.mlp.w1.weight": "model-00003-of-00004.safetensors",
142
+ "transformer.h.23.mlp.w2.weight": "model-00003-of-00004.safetensors",
143
+ "transformer.h.24.attn.c_attn.bias": "model-00003-of-00004.safetensors",
144
+ "transformer.h.24.attn.c_attn.weight": "model-00003-of-00004.safetensors",
145
+ "transformer.h.24.attn.c_proj.weight": "model-00003-of-00004.safetensors",
146
+ "transformer.h.24.ln_1.weight": "model-00003-of-00004.safetensors",
147
+ "transformer.h.24.ln_2.weight": "model-00003-of-00004.safetensors",
148
+ "transformer.h.24.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
149
+ "transformer.h.24.mlp.w1.weight": "model-00003-of-00004.safetensors",
150
+ "transformer.h.24.mlp.w2.weight": "model-00003-of-00004.safetensors",
151
+ "transformer.h.25.attn.c_attn.bias": "model-00003-of-00004.safetensors",
152
+ "transformer.h.25.attn.c_attn.weight": "model-00003-of-00004.safetensors",
153
+ "transformer.h.25.attn.c_proj.weight": "model-00003-of-00004.safetensors",
154
+ "transformer.h.25.ln_1.weight": "model-00003-of-00004.safetensors",
155
+ "transformer.h.25.ln_2.weight": "model-00003-of-00004.safetensors",
156
+ "transformer.h.25.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
157
+ "transformer.h.25.mlp.w1.weight": "model-00003-of-00004.safetensors",
158
+ "transformer.h.25.mlp.w2.weight": "model-00003-of-00004.safetensors",
159
+ "transformer.h.26.attn.c_attn.bias": "model-00003-of-00004.safetensors",
160
+ "transformer.h.26.attn.c_attn.weight": "model-00003-of-00004.safetensors",
161
+ "transformer.h.26.attn.c_proj.weight": "model-00003-of-00004.safetensors",
162
+ "transformer.h.26.ln_1.weight": "model-00003-of-00004.safetensors",
163
+ "transformer.h.26.ln_2.weight": "model-00003-of-00004.safetensors",
164
+ "transformer.h.26.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
165
+ "transformer.h.26.mlp.w1.weight": "model-00003-of-00004.safetensors",
166
+ "transformer.h.26.mlp.w2.weight": "model-00003-of-00004.safetensors",
167
+ "transformer.h.27.attn.c_attn.bias": "model-00003-of-00004.safetensors",
168
+ "transformer.h.27.attn.c_attn.weight": "model-00003-of-00004.safetensors",
169
+ "transformer.h.27.attn.c_proj.weight": "model-00003-of-00004.safetensors",
170
+ "transformer.h.27.ln_1.weight": "model-00003-of-00004.safetensors",
171
+ "transformer.h.27.ln_2.weight": "model-00003-of-00004.safetensors",
172
+ "transformer.h.27.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
173
+ "transformer.h.27.mlp.w1.weight": "model-00003-of-00004.safetensors",
174
+ "transformer.h.27.mlp.w2.weight": "model-00003-of-00004.safetensors",
175
+ "transformer.h.28.attn.c_attn.bias": "model-00003-of-00004.safetensors",
176
+ "transformer.h.28.attn.c_attn.weight": "model-00003-of-00004.safetensors",
177
+ "transformer.h.28.attn.c_proj.weight": "model-00003-of-00004.safetensors",
178
+ "transformer.h.28.ln_1.weight": "model-00003-of-00004.safetensors",
179
+ "transformer.h.28.ln_2.weight": "model-00003-of-00004.safetensors",
180
+ "transformer.h.28.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
181
+ "transformer.h.28.mlp.w1.weight": "model-00003-of-00004.safetensors",
182
+ "transformer.h.28.mlp.w2.weight": "model-00003-of-00004.safetensors",
183
+ "transformer.h.29.attn.c_attn.bias": "model-00003-of-00004.safetensors",
184
+ "transformer.h.29.attn.c_attn.weight": "model-00003-of-00004.safetensors",
185
+ "transformer.h.29.attn.c_proj.weight": "model-00003-of-00004.safetensors",
186
+ "transformer.h.29.ln_1.weight": "model-00003-of-00004.safetensors",
187
+ "transformer.h.29.ln_2.weight": "model-00003-of-00004.safetensors",
188
+ "transformer.h.29.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
189
+ "transformer.h.29.mlp.w1.weight": "model-00003-of-00004.safetensors",
190
+ "transformer.h.29.mlp.w2.weight": "model-00003-of-00004.safetensors",
191
+ "transformer.h.3.attn.c_attn.bias": "model-00001-of-00004.safetensors",
192
+ "transformer.h.3.attn.c_attn.weight": "model-00001-of-00004.safetensors",
193
+ "transformer.h.3.attn.c_proj.weight": "model-00001-of-00004.safetensors",
194
+ "transformer.h.3.ln_1.weight": "model-00001-of-00004.safetensors",
195
+ "transformer.h.3.ln_2.weight": "model-00001-of-00004.safetensors",
196
+ "transformer.h.3.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
197
+ "transformer.h.3.mlp.w1.weight": "model-00001-of-00004.safetensors",
198
+ "transformer.h.3.mlp.w2.weight": "model-00001-of-00004.safetensors",
199
+ "transformer.h.30.attn.c_attn.bias": "model-00003-of-00004.safetensors",
200
+ "transformer.h.30.attn.c_attn.weight": "model-00003-of-00004.safetensors",
201
+ "transformer.h.30.attn.c_proj.weight": "model-00003-of-00004.safetensors",
202
+ "transformer.h.30.ln_1.weight": "model-00003-of-00004.safetensors",
203
+ "transformer.h.30.ln_2.weight": "model-00003-of-00004.safetensors",
204
+ "transformer.h.30.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
205
+ "transformer.h.30.mlp.w1.weight": "model-00003-of-00004.safetensors",
206
+ "transformer.h.30.mlp.w2.weight": "model-00003-of-00004.safetensors",
207
+ "transformer.h.31.attn.c_attn.bias": "model-00003-of-00004.safetensors",
208
+ "transformer.h.31.attn.c_attn.weight": "model-00003-of-00004.safetensors",
209
+ "transformer.h.31.attn.c_proj.weight": "model-00003-of-00004.safetensors",
210
+ "transformer.h.31.ln_1.weight": "model-00003-of-00004.safetensors",
211
+ "transformer.h.31.ln_2.weight": "model-00003-of-00004.safetensors",
212
+ "transformer.h.31.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
213
+ "transformer.h.31.mlp.w1.weight": "model-00003-of-00004.safetensors",
214
+ "transformer.h.31.mlp.w2.weight": "model-00003-of-00004.safetensors",
215
+ "transformer.h.4.attn.c_attn.bias": "model-00001-of-00004.safetensors",
216
+ "transformer.h.4.attn.c_attn.weight": "model-00001-of-00004.safetensors",
217
+ "transformer.h.4.attn.c_proj.weight": "model-00001-of-00004.safetensors",
218
+ "transformer.h.4.ln_1.weight": "model-00001-of-00004.safetensors",
219
+ "transformer.h.4.ln_2.weight": "model-00001-of-00004.safetensors",
220
+ "transformer.h.4.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
221
+ "transformer.h.4.mlp.w1.weight": "model-00001-of-00004.safetensors",
222
+ "transformer.h.4.mlp.w2.weight": "model-00001-of-00004.safetensors",
223
+ "transformer.h.5.attn.c_attn.bias": "model-00001-of-00004.safetensors",
224
+ "transformer.h.5.attn.c_attn.weight": "model-00001-of-00004.safetensors",
225
+ "transformer.h.5.attn.c_proj.weight": "model-00001-of-00004.safetensors",
226
+ "transformer.h.5.ln_1.weight": "model-00001-of-00004.safetensors",
227
+ "transformer.h.5.ln_2.weight": "model-00001-of-00004.safetensors",
228
+ "transformer.h.5.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
229
+ "transformer.h.5.mlp.w1.weight": "model-00001-of-00004.safetensors",
230
+ "transformer.h.5.mlp.w2.weight": "model-00001-of-00004.safetensors",
231
+ "transformer.h.6.attn.c_attn.bias": "model-00001-of-00004.safetensors",
232
+ "transformer.h.6.attn.c_attn.weight": "model-00001-of-00004.safetensors",
233
+ "transformer.h.6.attn.c_proj.weight": "model-00001-of-00004.safetensors",
234
+ "transformer.h.6.ln_1.weight": "model-00001-of-00004.safetensors",
235
+ "transformer.h.6.ln_2.weight": "model-00001-of-00004.safetensors",
236
+ "transformer.h.6.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
237
+ "transformer.h.6.mlp.w1.weight": "model-00001-of-00004.safetensors",
238
+ "transformer.h.6.mlp.w2.weight": "model-00001-of-00004.safetensors",
239
+ "transformer.h.7.attn.c_attn.bias": "model-00001-of-00004.safetensors",
240
+ "transformer.h.7.attn.c_attn.weight": "model-00001-of-00004.safetensors",
241
+ "transformer.h.7.attn.c_proj.weight": "model-00001-of-00004.safetensors",
242
+ "transformer.h.7.ln_1.weight": "model-00001-of-00004.safetensors",
243
+ "transformer.h.7.ln_2.weight": "model-00001-of-00004.safetensors",
244
+ "transformer.h.7.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
245
+ "transformer.h.7.mlp.w1.weight": "model-00001-of-00004.safetensors",
246
+ "transformer.h.7.mlp.w2.weight": "model-00001-of-00004.safetensors",
247
+ "transformer.h.8.attn.c_attn.bias": "model-00001-of-00004.safetensors",
248
+ "transformer.h.8.attn.c_attn.weight": "model-00001-of-00004.safetensors",
249
+ "transformer.h.8.attn.c_proj.weight": "model-00001-of-00004.safetensors",
250
+ "transformer.h.8.ln_1.weight": "model-00001-of-00004.safetensors",
251
+ "transformer.h.8.ln_2.weight": "model-00001-of-00004.safetensors",
252
+ "transformer.h.8.mlp.c_proj.weight": "model-00001-of-00004.safetensors",
253
+ "transformer.h.8.mlp.w1.weight": "model-00001-of-00004.safetensors",
254
+ "transformer.h.8.mlp.w2.weight": "model-00001-of-00004.safetensors",
255
+ "transformer.h.9.attn.c_attn.bias": "model-00001-of-00004.safetensors",
256
+ "transformer.h.9.attn.c_attn.weight": "model-00001-of-00004.safetensors",
257
+ "transformer.h.9.attn.c_proj.weight": "model-00002-of-00004.safetensors",
258
+ "transformer.h.9.ln_1.weight": "model-00001-of-00004.safetensors",
259
+ "transformer.h.9.ln_2.weight": "model-00002-of-00004.safetensors",
260
+ "transformer.h.9.mlp.c_proj.weight": "model-00002-of-00004.safetensors",
261
+ "transformer.h.9.mlp.w1.weight": "model-00002-of-00004.safetensors",
262
+ "transformer.h.9.mlp.w2.weight": "model-00002-of-00004.safetensors",
263
+ "transformer.ln_f.weight": "model-00003-of-00004.safetensors",
264
+ "transformer.visual.attn_pool.attn.in_proj_bias": "model-00004-of-00004.safetensors",
265
+ "transformer.visual.attn_pool.attn.in_proj_weight": "model-00004-of-00004.safetensors",
266
+ "transformer.visual.attn_pool.attn.out_proj.bias": "model-00004-of-00004.safetensors",
267
+ "transformer.visual.attn_pool.attn.out_proj.weight": "model-00004-of-00004.safetensors",
268
+ "transformer.visual.attn_pool.kv_proj.weight": "model-00004-of-00004.safetensors",
269
+ "transformer.visual.attn_pool.ln_kv.bias": "model-00004-of-00004.safetensors",
270
+ "transformer.visual.attn_pool.ln_kv.weight": "model-00004-of-00004.safetensors",
271
+ "transformer.visual.attn_pool.ln_q.bias": "model-00004-of-00004.safetensors",
272
+ "transformer.visual.attn_pool.ln_q.weight": "model-00004-of-00004.safetensors",
273
+ "transformer.visual.attn_pool.pos_embed": "model-00004-of-00004.safetensors",
274
+ "transformer.visual.attn_pool.query": "model-00004-of-00004.safetensors",
275
+ "transformer.visual.conv1.weight": "model-00003-of-00004.safetensors",
276
+ "transformer.visual.ln_post.bias": "model-00004-of-00004.safetensors",
277
+ "transformer.visual.ln_post.weight": "model-00004-of-00004.safetensors",
278
+ "transformer.visual.ln_pre.bias": "model-00003-of-00004.safetensors",
279
+ "transformer.visual.ln_pre.weight": "model-00003-of-00004.safetensors",
280
+ "transformer.visual.positional_embedding": "model-00003-of-00004.safetensors",
281
+ "transformer.visual.proj": "model-00003-of-00004.safetensors",
282
+ "transformer.visual.transformer.resblocks.0.attn.in_proj.bias": "model-00003-of-00004.safetensors",
283
+ "transformer.visual.transformer.resblocks.0.attn.in_proj.weight": "model-00003-of-00004.safetensors",
284
+ "transformer.visual.transformer.resblocks.0.attn.out_proj.bias": "model-00003-of-00004.safetensors",
285
+ "transformer.visual.transformer.resblocks.0.attn.out_proj.weight": "model-00003-of-00004.safetensors",
286
+ "transformer.visual.transformer.resblocks.0.ln_1.bias": "model-00003-of-00004.safetensors",
287
+ "transformer.visual.transformer.resblocks.0.ln_1.weight": "model-00003-of-00004.safetensors",
288
+ "transformer.visual.transformer.resblocks.0.ln_2.bias": "model-00003-of-00004.safetensors",
289
+ "transformer.visual.transformer.resblocks.0.ln_2.weight": "model-00003-of-00004.safetensors",
290
+ "transformer.visual.transformer.resblocks.0.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
291
+ "transformer.visual.transformer.resblocks.0.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
292
+ "transformer.visual.transformer.resblocks.0.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
293
+ "transformer.visual.transformer.resblocks.0.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
294
+ "transformer.visual.transformer.resblocks.1.attn.in_proj.bias": "model-00003-of-00004.safetensors",
295
+ "transformer.visual.transformer.resblocks.1.attn.in_proj.weight": "model-00003-of-00004.safetensors",
296
+ "transformer.visual.transformer.resblocks.1.attn.out_proj.bias": "model-00003-of-00004.safetensors",
297
+ "transformer.visual.transformer.resblocks.1.attn.out_proj.weight": "model-00003-of-00004.safetensors",
298
+ "transformer.visual.transformer.resblocks.1.ln_1.bias": "model-00003-of-00004.safetensors",
299
+ "transformer.visual.transformer.resblocks.1.ln_1.weight": "model-00003-of-00004.safetensors",
300
+ "transformer.visual.transformer.resblocks.1.ln_2.bias": "model-00003-of-00004.safetensors",
301
+ "transformer.visual.transformer.resblocks.1.ln_2.weight": "model-00003-of-00004.safetensors",
302
+ "transformer.visual.transformer.resblocks.1.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
303
+ "transformer.visual.transformer.resblocks.1.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
304
+ "transformer.visual.transformer.resblocks.1.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
305
+ "transformer.visual.transformer.resblocks.1.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
306
+ "transformer.visual.transformer.resblocks.10.attn.in_proj.bias": "model-00004-of-00004.safetensors",
307
+ "transformer.visual.transformer.resblocks.10.attn.in_proj.weight": "model-00004-of-00004.safetensors",
308
+ "transformer.visual.transformer.resblocks.10.attn.out_proj.bias": "model-00004-of-00004.safetensors",
309
+ "transformer.visual.transformer.resblocks.10.attn.out_proj.weight": "model-00004-of-00004.safetensors",
310
+ "transformer.visual.transformer.resblocks.10.ln_1.bias": "model-00004-of-00004.safetensors",
311
+ "transformer.visual.transformer.resblocks.10.ln_1.weight": "model-00004-of-00004.safetensors",
312
+ "transformer.visual.transformer.resblocks.10.ln_2.bias": "model-00004-of-00004.safetensors",
313
+ "transformer.visual.transformer.resblocks.10.ln_2.weight": "model-00004-of-00004.safetensors",
314
+ "transformer.visual.transformer.resblocks.10.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
315
+ "transformer.visual.transformer.resblocks.10.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
316
+ "transformer.visual.transformer.resblocks.10.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
317
+ "transformer.visual.transformer.resblocks.10.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
318
+ "transformer.visual.transformer.resblocks.11.attn.in_proj.bias": "model-00004-of-00004.safetensors",
319
+ "transformer.visual.transformer.resblocks.11.attn.in_proj.weight": "model-00004-of-00004.safetensors",
320
+ "transformer.visual.transformer.resblocks.11.attn.out_proj.bias": "model-00004-of-00004.safetensors",
321
+ "transformer.visual.transformer.resblocks.11.attn.out_proj.weight": "model-00004-of-00004.safetensors",
322
+ "transformer.visual.transformer.resblocks.11.ln_1.bias": "model-00004-of-00004.safetensors",
323
+ "transformer.visual.transformer.resblocks.11.ln_1.weight": "model-00004-of-00004.safetensors",
324
+ "transformer.visual.transformer.resblocks.11.ln_2.bias": "model-00004-of-00004.safetensors",
325
+ "transformer.visual.transformer.resblocks.11.ln_2.weight": "model-00004-of-00004.safetensors",
326
+ "transformer.visual.transformer.resblocks.11.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
327
+ "transformer.visual.transformer.resblocks.11.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
328
+ "transformer.visual.transformer.resblocks.11.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
329
+ "transformer.visual.transformer.resblocks.11.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
330
+ "transformer.visual.transformer.resblocks.12.attn.in_proj.bias": "model-00004-of-00004.safetensors",
331
+ "transformer.visual.transformer.resblocks.12.attn.in_proj.weight": "model-00004-of-00004.safetensors",
332
+ "transformer.visual.transformer.resblocks.12.attn.out_proj.bias": "model-00004-of-00004.safetensors",
333
+ "transformer.visual.transformer.resblocks.12.attn.out_proj.weight": "model-00004-of-00004.safetensors",
334
+ "transformer.visual.transformer.resblocks.12.ln_1.bias": "model-00004-of-00004.safetensors",
335
+ "transformer.visual.transformer.resblocks.12.ln_1.weight": "model-00004-of-00004.safetensors",
336
+ "transformer.visual.transformer.resblocks.12.ln_2.bias": "model-00004-of-00004.safetensors",
337
+ "transformer.visual.transformer.resblocks.12.ln_2.weight": "model-00004-of-00004.safetensors",
338
+ "transformer.visual.transformer.resblocks.12.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
339
+ "transformer.visual.transformer.resblocks.12.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
340
+ "transformer.visual.transformer.resblocks.12.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
341
+ "transformer.visual.transformer.resblocks.12.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
342
+ "transformer.visual.transformer.resblocks.13.attn.in_proj.bias": "model-00004-of-00004.safetensors",
343
+ "transformer.visual.transformer.resblocks.13.attn.in_proj.weight": "model-00004-of-00004.safetensors",
344
+ "transformer.visual.transformer.resblocks.13.attn.out_proj.bias": "model-00004-of-00004.safetensors",
345
+ "transformer.visual.transformer.resblocks.13.attn.out_proj.weight": "model-00004-of-00004.safetensors",
346
+ "transformer.visual.transformer.resblocks.13.ln_1.bias": "model-00004-of-00004.safetensors",
347
+ "transformer.visual.transformer.resblocks.13.ln_1.weight": "model-00004-of-00004.safetensors",
348
+ "transformer.visual.transformer.resblocks.13.ln_2.bias": "model-00004-of-00004.safetensors",
349
+ "transformer.visual.transformer.resblocks.13.ln_2.weight": "model-00004-of-00004.safetensors",
350
+ "transformer.visual.transformer.resblocks.13.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
351
+ "transformer.visual.transformer.resblocks.13.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
352
+ "transformer.visual.transformer.resblocks.13.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
353
+ "transformer.visual.transformer.resblocks.13.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
354
+ "transformer.visual.transformer.resblocks.14.attn.in_proj.bias": "model-00004-of-00004.safetensors",
355
+ "transformer.visual.transformer.resblocks.14.attn.in_proj.weight": "model-00004-of-00004.safetensors",
356
+ "transformer.visual.transformer.resblocks.14.attn.out_proj.bias": "model-00004-of-00004.safetensors",
357
+ "transformer.visual.transformer.resblocks.14.attn.out_proj.weight": "model-00004-of-00004.safetensors",
358
+ "transformer.visual.transformer.resblocks.14.ln_1.bias": "model-00004-of-00004.safetensors",
359
+ "transformer.visual.transformer.resblocks.14.ln_1.weight": "model-00004-of-00004.safetensors",
360
+ "transformer.visual.transformer.resblocks.14.ln_2.bias": "model-00004-of-00004.safetensors",
361
+ "transformer.visual.transformer.resblocks.14.ln_2.weight": "model-00004-of-00004.safetensors",
362
+ "transformer.visual.transformer.resblocks.14.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
363
+ "transformer.visual.transformer.resblocks.14.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
364
+ "transformer.visual.transformer.resblocks.14.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
365
+ "transformer.visual.transformer.resblocks.14.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
366
+ "transformer.visual.transformer.resblocks.15.attn.in_proj.bias": "model-00004-of-00004.safetensors",
367
+ "transformer.visual.transformer.resblocks.15.attn.in_proj.weight": "model-00004-of-00004.safetensors",
368
+ "transformer.visual.transformer.resblocks.15.attn.out_proj.bias": "model-00004-of-00004.safetensors",
369
+ "transformer.visual.transformer.resblocks.15.attn.out_proj.weight": "model-00004-of-00004.safetensors",
370
+ "transformer.visual.transformer.resblocks.15.ln_1.bias": "model-00004-of-00004.safetensors",
371
+ "transformer.visual.transformer.resblocks.15.ln_1.weight": "model-00004-of-00004.safetensors",
372
+ "transformer.visual.transformer.resblocks.15.ln_2.bias": "model-00004-of-00004.safetensors",
373
+ "transformer.visual.transformer.resblocks.15.ln_2.weight": "model-00004-of-00004.safetensors",
374
+ "transformer.visual.transformer.resblocks.15.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
375
+ "transformer.visual.transformer.resblocks.15.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
376
+ "transformer.visual.transformer.resblocks.15.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
377
+ "transformer.visual.transformer.resblocks.15.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
378
+ "transformer.visual.transformer.resblocks.16.attn.in_proj.bias": "model-00004-of-00004.safetensors",
379
+ "transformer.visual.transformer.resblocks.16.attn.in_proj.weight": "model-00004-of-00004.safetensors",
380
+ "transformer.visual.transformer.resblocks.16.attn.out_proj.bias": "model-00004-of-00004.safetensors",
381
+ "transformer.visual.transformer.resblocks.16.attn.out_proj.weight": "model-00004-of-00004.safetensors",
382
+ "transformer.visual.transformer.resblocks.16.ln_1.bias": "model-00004-of-00004.safetensors",
383
+ "transformer.visual.transformer.resblocks.16.ln_1.weight": "model-00004-of-00004.safetensors",
384
+ "transformer.visual.transformer.resblocks.16.ln_2.bias": "model-00004-of-00004.safetensors",
385
+ "transformer.visual.transformer.resblocks.16.ln_2.weight": "model-00004-of-00004.safetensors",
386
+ "transformer.visual.transformer.resblocks.16.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
387
+ "transformer.visual.transformer.resblocks.16.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
388
+ "transformer.visual.transformer.resblocks.16.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
389
+ "transformer.visual.transformer.resblocks.16.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
390
+ "transformer.visual.transformer.resblocks.17.attn.in_proj.bias": "model-00004-of-00004.safetensors",
391
+ "transformer.visual.transformer.resblocks.17.attn.in_proj.weight": "model-00004-of-00004.safetensors",
392
+ "transformer.visual.transformer.resblocks.17.attn.out_proj.bias": "model-00004-of-00004.safetensors",
393
+ "transformer.visual.transformer.resblocks.17.attn.out_proj.weight": "model-00004-of-00004.safetensors",
394
+ "transformer.visual.transformer.resblocks.17.ln_1.bias": "model-00004-of-00004.safetensors",
395
+ "transformer.visual.transformer.resblocks.17.ln_1.weight": "model-00004-of-00004.safetensors",
396
+ "transformer.visual.transformer.resblocks.17.ln_2.bias": "model-00004-of-00004.safetensors",
397
+ "transformer.visual.transformer.resblocks.17.ln_2.weight": "model-00004-of-00004.safetensors",
398
+ "transformer.visual.transformer.resblocks.17.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
399
+ "transformer.visual.transformer.resblocks.17.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
400
+ "transformer.visual.transformer.resblocks.17.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
401
+ "transformer.visual.transformer.resblocks.17.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
402
+ "transformer.visual.transformer.resblocks.18.attn.in_proj.bias": "model-00004-of-00004.safetensors",
403
+ "transformer.visual.transformer.resblocks.18.attn.in_proj.weight": "model-00004-of-00004.safetensors",
404
+ "transformer.visual.transformer.resblocks.18.attn.out_proj.bias": "model-00004-of-00004.safetensors",
405
+ "transformer.visual.transformer.resblocks.18.attn.out_proj.weight": "model-00004-of-00004.safetensors",
406
+ "transformer.visual.transformer.resblocks.18.ln_1.bias": "model-00004-of-00004.safetensors",
407
+ "transformer.visual.transformer.resblocks.18.ln_1.weight": "model-00004-of-00004.safetensors",
408
+ "transformer.visual.transformer.resblocks.18.ln_2.bias": "model-00004-of-00004.safetensors",
409
+ "transformer.visual.transformer.resblocks.18.ln_2.weight": "model-00004-of-00004.safetensors",
410
+ "transformer.visual.transformer.resblocks.18.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
411
+ "transformer.visual.transformer.resblocks.18.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
412
+ "transformer.visual.transformer.resblocks.18.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
413
+ "transformer.visual.transformer.resblocks.18.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
414
+ "transformer.visual.transformer.resblocks.19.attn.in_proj.bias": "model-00004-of-00004.safetensors",
415
+ "transformer.visual.transformer.resblocks.19.attn.in_proj.weight": "model-00004-of-00004.safetensors",
416
+ "transformer.visual.transformer.resblocks.19.attn.out_proj.bias": "model-00004-of-00004.safetensors",
417
+ "transformer.visual.transformer.resblocks.19.attn.out_proj.weight": "model-00004-of-00004.safetensors",
418
+ "transformer.visual.transformer.resblocks.19.ln_1.bias": "model-00004-of-00004.safetensors",
419
+ "transformer.visual.transformer.resblocks.19.ln_1.weight": "model-00004-of-00004.safetensors",
420
+ "transformer.visual.transformer.resblocks.19.ln_2.bias": "model-00004-of-00004.safetensors",
421
+ "transformer.visual.transformer.resblocks.19.ln_2.weight": "model-00004-of-00004.safetensors",
422
+ "transformer.visual.transformer.resblocks.19.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
423
+ "transformer.visual.transformer.resblocks.19.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
424
+ "transformer.visual.transformer.resblocks.19.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
425
+ "transformer.visual.transformer.resblocks.19.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
426
+ "transformer.visual.transformer.resblocks.2.attn.in_proj.bias": "model-00003-of-00004.safetensors",
427
+ "transformer.visual.transformer.resblocks.2.attn.in_proj.weight": "model-00003-of-00004.safetensors",
428
+ "transformer.visual.transformer.resblocks.2.attn.out_proj.bias": "model-00003-of-00004.safetensors",
429
+ "transformer.visual.transformer.resblocks.2.attn.out_proj.weight": "model-00003-of-00004.safetensors",
430
+ "transformer.visual.transformer.resblocks.2.ln_1.bias": "model-00003-of-00004.safetensors",
431
+ "transformer.visual.transformer.resblocks.2.ln_1.weight": "model-00003-of-00004.safetensors",
432
+ "transformer.visual.transformer.resblocks.2.ln_2.bias": "model-00003-of-00004.safetensors",
433
+ "transformer.visual.transformer.resblocks.2.ln_2.weight": "model-00003-of-00004.safetensors",
434
+ "transformer.visual.transformer.resblocks.2.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
435
+ "transformer.visual.transformer.resblocks.2.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
436
+ "transformer.visual.transformer.resblocks.2.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
437
+ "transformer.visual.transformer.resblocks.2.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
438
+ "transformer.visual.transformer.resblocks.20.attn.in_proj.bias": "model-00004-of-00004.safetensors",
439
+ "transformer.visual.transformer.resblocks.20.attn.in_proj.weight": "model-00004-of-00004.safetensors",
440
+ "transformer.visual.transformer.resblocks.20.attn.out_proj.bias": "model-00004-of-00004.safetensors",
441
+ "transformer.visual.transformer.resblocks.20.attn.out_proj.weight": "model-00004-of-00004.safetensors",
442
+ "transformer.visual.transformer.resblocks.20.ln_1.bias": "model-00004-of-00004.safetensors",
443
+ "transformer.visual.transformer.resblocks.20.ln_1.weight": "model-00004-of-00004.safetensors",
444
+ "transformer.visual.transformer.resblocks.20.ln_2.bias": "model-00004-of-00004.safetensors",
445
+ "transformer.visual.transformer.resblocks.20.ln_2.weight": "model-00004-of-00004.safetensors",
446
+ "transformer.visual.transformer.resblocks.20.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
447
+ "transformer.visual.transformer.resblocks.20.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
448
+ "transformer.visual.transformer.resblocks.20.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
449
+ "transformer.visual.transformer.resblocks.20.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
450
+ "transformer.visual.transformer.resblocks.21.attn.in_proj.bias": "model-00004-of-00004.safetensors",
451
+ "transformer.visual.transformer.resblocks.21.attn.in_proj.weight": "model-00004-of-00004.safetensors",
452
+ "transformer.visual.transformer.resblocks.21.attn.out_proj.bias": "model-00004-of-00004.safetensors",
453
+ "transformer.visual.transformer.resblocks.21.attn.out_proj.weight": "model-00004-of-00004.safetensors",
454
+ "transformer.visual.transformer.resblocks.21.ln_1.bias": "model-00004-of-00004.safetensors",
455
+ "transformer.visual.transformer.resblocks.21.ln_1.weight": "model-00004-of-00004.safetensors",
456
+ "transformer.visual.transformer.resblocks.21.ln_2.bias": "model-00004-of-00004.safetensors",
457
+ "transformer.visual.transformer.resblocks.21.ln_2.weight": "model-00004-of-00004.safetensors",
458
+ "transformer.visual.transformer.resblocks.21.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
459
+ "transformer.visual.transformer.resblocks.21.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
460
+ "transformer.visual.transformer.resblocks.21.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
461
+ "transformer.visual.transformer.resblocks.21.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
462
+ "transformer.visual.transformer.resblocks.22.attn.in_proj.bias": "model-00004-of-00004.safetensors",
463
+ "transformer.visual.transformer.resblocks.22.attn.in_proj.weight": "model-00004-of-00004.safetensors",
464
+ "transformer.visual.transformer.resblocks.22.attn.out_proj.bias": "model-00004-of-00004.safetensors",
465
+ "transformer.visual.transformer.resblocks.22.attn.out_proj.weight": "model-00004-of-00004.safetensors",
466
+ "transformer.visual.transformer.resblocks.22.ln_1.bias": "model-00004-of-00004.safetensors",
467
+ "transformer.visual.transformer.resblocks.22.ln_1.weight": "model-00004-of-00004.safetensors",
468
+ "transformer.visual.transformer.resblocks.22.ln_2.bias": "model-00004-of-00004.safetensors",
469
+ "transformer.visual.transformer.resblocks.22.ln_2.weight": "model-00004-of-00004.safetensors",
470
+ "transformer.visual.transformer.resblocks.22.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
471
+ "transformer.visual.transformer.resblocks.22.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
472
+ "transformer.visual.transformer.resblocks.22.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
473
+ "transformer.visual.transformer.resblocks.22.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
474
+ "transformer.visual.transformer.resblocks.23.attn.in_proj.bias": "model-00004-of-00004.safetensors",
475
+ "transformer.visual.transformer.resblocks.23.attn.in_proj.weight": "model-00004-of-00004.safetensors",
476
+ "transformer.visual.transformer.resblocks.23.attn.out_proj.bias": "model-00004-of-00004.safetensors",
477
+ "transformer.visual.transformer.resblocks.23.attn.out_proj.weight": "model-00004-of-00004.safetensors",
478
+ "transformer.visual.transformer.resblocks.23.ln_1.bias": "model-00004-of-00004.safetensors",
479
+ "transformer.visual.transformer.resblocks.23.ln_1.weight": "model-00004-of-00004.safetensors",
480
+ "transformer.visual.transformer.resblocks.23.ln_2.bias": "model-00004-of-00004.safetensors",
481
+ "transformer.visual.transformer.resblocks.23.ln_2.weight": "model-00004-of-00004.safetensors",
482
+ "transformer.visual.transformer.resblocks.23.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
483
+ "transformer.visual.transformer.resblocks.23.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
484
+ "transformer.visual.transformer.resblocks.23.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
485
+ "transformer.visual.transformer.resblocks.23.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
486
+ "transformer.visual.transformer.resblocks.24.attn.in_proj.bias": "model-00004-of-00004.safetensors",
487
+ "transformer.visual.transformer.resblocks.24.attn.in_proj.weight": "model-00004-of-00004.safetensors",
488
+ "transformer.visual.transformer.resblocks.24.attn.out_proj.bias": "model-00004-of-00004.safetensors",
489
+ "transformer.visual.transformer.resblocks.24.attn.out_proj.weight": "model-00004-of-00004.safetensors",
490
+ "transformer.visual.transformer.resblocks.24.ln_1.bias": "model-00004-of-00004.safetensors",
491
+ "transformer.visual.transformer.resblocks.24.ln_1.weight": "model-00004-of-00004.safetensors",
492
+ "transformer.visual.transformer.resblocks.24.ln_2.bias": "model-00004-of-00004.safetensors",
493
+ "transformer.visual.transformer.resblocks.24.ln_2.weight": "model-00004-of-00004.safetensors",
494
+ "transformer.visual.transformer.resblocks.24.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
495
+ "transformer.visual.transformer.resblocks.24.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
496
+ "transformer.visual.transformer.resblocks.24.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
497
+ "transformer.visual.transformer.resblocks.24.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
498
+ "transformer.visual.transformer.resblocks.25.attn.in_proj.bias": "model-00004-of-00004.safetensors",
499
+ "transformer.visual.transformer.resblocks.25.attn.in_proj.weight": "model-00004-of-00004.safetensors",
500
+ "transformer.visual.transformer.resblocks.25.attn.out_proj.bias": "model-00004-of-00004.safetensors",
501
+ "transformer.visual.transformer.resblocks.25.attn.out_proj.weight": "model-00004-of-00004.safetensors",
502
+ "transformer.visual.transformer.resblocks.25.ln_1.bias": "model-00004-of-00004.safetensors",
503
+ "transformer.visual.transformer.resblocks.25.ln_1.weight": "model-00004-of-00004.safetensors",
504
+ "transformer.visual.transformer.resblocks.25.ln_2.bias": "model-00004-of-00004.safetensors",
505
+ "transformer.visual.transformer.resblocks.25.ln_2.weight": "model-00004-of-00004.safetensors",
506
+ "transformer.visual.transformer.resblocks.25.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
507
+ "transformer.visual.transformer.resblocks.25.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
508
+ "transformer.visual.transformer.resblocks.25.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
509
+ "transformer.visual.transformer.resblocks.25.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
510
+ "transformer.visual.transformer.resblocks.26.attn.in_proj.bias": "model-00004-of-00004.safetensors",
511
+ "transformer.visual.transformer.resblocks.26.attn.in_proj.weight": "model-00004-of-00004.safetensors",
512
+ "transformer.visual.transformer.resblocks.26.attn.out_proj.bias": "model-00004-of-00004.safetensors",
513
+ "transformer.visual.transformer.resblocks.26.attn.out_proj.weight": "model-00004-of-00004.safetensors",
514
+ "transformer.visual.transformer.resblocks.26.ln_1.bias": "model-00004-of-00004.safetensors",
515
+ "transformer.visual.transformer.resblocks.26.ln_1.weight": "model-00004-of-00004.safetensors",
516
+ "transformer.visual.transformer.resblocks.26.ln_2.bias": "model-00004-of-00004.safetensors",
517
+ "transformer.visual.transformer.resblocks.26.ln_2.weight": "model-00004-of-00004.safetensors",
518
+ "transformer.visual.transformer.resblocks.26.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
519
+ "transformer.visual.transformer.resblocks.26.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
520
+ "transformer.visual.transformer.resblocks.26.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
521
+ "transformer.visual.transformer.resblocks.26.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
522
+ "transformer.visual.transformer.resblocks.27.attn.in_proj.bias": "model-00004-of-00004.safetensors",
523
+ "transformer.visual.transformer.resblocks.27.attn.in_proj.weight": "model-00004-of-00004.safetensors",
524
+ "transformer.visual.transformer.resblocks.27.attn.out_proj.bias": "model-00004-of-00004.safetensors",
525
+ "transformer.visual.transformer.resblocks.27.attn.out_proj.weight": "model-00004-of-00004.safetensors",
526
+ "transformer.visual.transformer.resblocks.27.ln_1.bias": "model-00004-of-00004.safetensors",
527
+ "transformer.visual.transformer.resblocks.27.ln_1.weight": "model-00004-of-00004.safetensors",
528
+ "transformer.visual.transformer.resblocks.27.ln_2.bias": "model-00004-of-00004.safetensors",
529
+ "transformer.visual.transformer.resblocks.27.ln_2.weight": "model-00004-of-00004.safetensors",
530
+ "transformer.visual.transformer.resblocks.27.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
531
+ "transformer.visual.transformer.resblocks.27.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
532
+ "transformer.visual.transformer.resblocks.27.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
533
+ "transformer.visual.transformer.resblocks.27.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
534
+ "transformer.visual.transformer.resblocks.28.attn.in_proj.bias": "model-00004-of-00004.safetensors",
535
+ "transformer.visual.transformer.resblocks.28.attn.in_proj.weight": "model-00004-of-00004.safetensors",
536
+ "transformer.visual.transformer.resblocks.28.attn.out_proj.bias": "model-00004-of-00004.safetensors",
537
+ "transformer.visual.transformer.resblocks.28.attn.out_proj.weight": "model-00004-of-00004.safetensors",
538
+ "transformer.visual.transformer.resblocks.28.ln_1.bias": "model-00004-of-00004.safetensors",
539
+ "transformer.visual.transformer.resblocks.28.ln_1.weight": "model-00004-of-00004.safetensors",
540
+ "transformer.visual.transformer.resblocks.28.ln_2.bias": "model-00004-of-00004.safetensors",
541
+ "transformer.visual.transformer.resblocks.28.ln_2.weight": "model-00004-of-00004.safetensors",
542
+ "transformer.visual.transformer.resblocks.28.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
543
+ "transformer.visual.transformer.resblocks.28.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
544
+ "transformer.visual.transformer.resblocks.28.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
545
+ "transformer.visual.transformer.resblocks.28.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
546
+ "transformer.visual.transformer.resblocks.29.attn.in_proj.bias": "model-00004-of-00004.safetensors",
547
+ "transformer.visual.transformer.resblocks.29.attn.in_proj.weight": "model-00004-of-00004.safetensors",
548
+ "transformer.visual.transformer.resblocks.29.attn.out_proj.bias": "model-00004-of-00004.safetensors",
549
+ "transformer.visual.transformer.resblocks.29.attn.out_proj.weight": "model-00004-of-00004.safetensors",
550
+ "transformer.visual.transformer.resblocks.29.ln_1.bias": "model-00004-of-00004.safetensors",
551
+ "transformer.visual.transformer.resblocks.29.ln_1.weight": "model-00004-of-00004.safetensors",
552
+ "transformer.visual.transformer.resblocks.29.ln_2.bias": "model-00004-of-00004.safetensors",
553
+ "transformer.visual.transformer.resblocks.29.ln_2.weight": "model-00004-of-00004.safetensors",
554
+ "transformer.visual.transformer.resblocks.29.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
555
+ "transformer.visual.transformer.resblocks.29.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
556
+ "transformer.visual.transformer.resblocks.29.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
557
+ "transformer.visual.transformer.resblocks.29.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
558
+ "transformer.visual.transformer.resblocks.3.attn.in_proj.bias": "model-00003-of-00004.safetensors",
559
+ "transformer.visual.transformer.resblocks.3.attn.in_proj.weight": "model-00003-of-00004.safetensors",
560
+ "transformer.visual.transformer.resblocks.3.attn.out_proj.bias": "model-00003-of-00004.safetensors",
561
+ "transformer.visual.transformer.resblocks.3.attn.out_proj.weight": "model-00003-of-00004.safetensors",
562
+ "transformer.visual.transformer.resblocks.3.ln_1.bias": "model-00003-of-00004.safetensors",
563
+ "transformer.visual.transformer.resblocks.3.ln_1.weight": "model-00003-of-00004.safetensors",
564
+ "transformer.visual.transformer.resblocks.3.ln_2.bias": "model-00003-of-00004.safetensors",
565
+ "transformer.visual.transformer.resblocks.3.ln_2.weight": "model-00003-of-00004.safetensors",
566
+ "transformer.visual.transformer.resblocks.3.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
567
+ "transformer.visual.transformer.resblocks.3.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
568
+ "transformer.visual.transformer.resblocks.3.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
569
+ "transformer.visual.transformer.resblocks.3.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
570
+ "transformer.visual.transformer.resblocks.30.attn.in_proj.bias": "model-00004-of-00004.safetensors",
571
+ "transformer.visual.transformer.resblocks.30.attn.in_proj.weight": "model-00004-of-00004.safetensors",
572
+ "transformer.visual.transformer.resblocks.30.attn.out_proj.bias": "model-00004-of-00004.safetensors",
573
+ "transformer.visual.transformer.resblocks.30.attn.out_proj.weight": "model-00004-of-00004.safetensors",
574
+ "transformer.visual.transformer.resblocks.30.ln_1.bias": "model-00004-of-00004.safetensors",
575
+ "transformer.visual.transformer.resblocks.30.ln_1.weight": "model-00004-of-00004.safetensors",
576
+ "transformer.visual.transformer.resblocks.30.ln_2.bias": "model-00004-of-00004.safetensors",
577
+ "transformer.visual.transformer.resblocks.30.ln_2.weight": "model-00004-of-00004.safetensors",
578
+ "transformer.visual.transformer.resblocks.30.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
579
+ "transformer.visual.transformer.resblocks.30.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
580
+ "transformer.visual.transformer.resblocks.30.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
581
+ "transformer.visual.transformer.resblocks.30.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
582
+ "transformer.visual.transformer.resblocks.31.attn.in_proj.bias": "model-00004-of-00004.safetensors",
583
+ "transformer.visual.transformer.resblocks.31.attn.in_proj.weight": "model-00004-of-00004.safetensors",
584
+ "transformer.visual.transformer.resblocks.31.attn.out_proj.bias": "model-00004-of-00004.safetensors",
585
+ "transformer.visual.transformer.resblocks.31.attn.out_proj.weight": "model-00004-of-00004.safetensors",
586
+ "transformer.visual.transformer.resblocks.31.ln_1.bias": "model-00004-of-00004.safetensors",
587
+ "transformer.visual.transformer.resblocks.31.ln_1.weight": "model-00004-of-00004.safetensors",
588
+ "transformer.visual.transformer.resblocks.31.ln_2.bias": "model-00004-of-00004.safetensors",
589
+ "transformer.visual.transformer.resblocks.31.ln_2.weight": "model-00004-of-00004.safetensors",
590
+ "transformer.visual.transformer.resblocks.31.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
591
+ "transformer.visual.transformer.resblocks.31.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
592
+ "transformer.visual.transformer.resblocks.31.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
593
+ "transformer.visual.transformer.resblocks.31.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
594
+ "transformer.visual.transformer.resblocks.32.attn.in_proj.bias": "model-00004-of-00004.safetensors",
595
+ "transformer.visual.transformer.resblocks.32.attn.in_proj.weight": "model-00004-of-00004.safetensors",
596
+ "transformer.visual.transformer.resblocks.32.attn.out_proj.bias": "model-00004-of-00004.safetensors",
597
+ "transformer.visual.transformer.resblocks.32.attn.out_proj.weight": "model-00004-of-00004.safetensors",
598
+ "transformer.visual.transformer.resblocks.32.ln_1.bias": "model-00004-of-00004.safetensors",
599
+ "transformer.visual.transformer.resblocks.32.ln_1.weight": "model-00004-of-00004.safetensors",
600
+ "transformer.visual.transformer.resblocks.32.ln_2.bias": "model-00004-of-00004.safetensors",
601
+ "transformer.visual.transformer.resblocks.32.ln_2.weight": "model-00004-of-00004.safetensors",
602
+ "transformer.visual.transformer.resblocks.32.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
603
+ "transformer.visual.transformer.resblocks.32.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
604
+ "transformer.visual.transformer.resblocks.32.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
605
+ "transformer.visual.transformer.resblocks.32.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
606
+ "transformer.visual.transformer.resblocks.33.attn.in_proj.bias": "model-00004-of-00004.safetensors",
607
+ "transformer.visual.transformer.resblocks.33.attn.in_proj.weight": "model-00004-of-00004.safetensors",
608
+ "transformer.visual.transformer.resblocks.33.attn.out_proj.bias": "model-00004-of-00004.safetensors",
609
+ "transformer.visual.transformer.resblocks.33.attn.out_proj.weight": "model-00004-of-00004.safetensors",
610
+ "transformer.visual.transformer.resblocks.33.ln_1.bias": "model-00004-of-00004.safetensors",
611
+ "transformer.visual.transformer.resblocks.33.ln_1.weight": "model-00004-of-00004.safetensors",
612
+ "transformer.visual.transformer.resblocks.33.ln_2.bias": "model-00004-of-00004.safetensors",
613
+ "transformer.visual.transformer.resblocks.33.ln_2.weight": "model-00004-of-00004.safetensors",
614
+ "transformer.visual.transformer.resblocks.33.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
615
+ "transformer.visual.transformer.resblocks.33.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
616
+ "transformer.visual.transformer.resblocks.33.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
617
+ "transformer.visual.transformer.resblocks.33.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
618
+ "transformer.visual.transformer.resblocks.34.attn.in_proj.bias": "model-00004-of-00004.safetensors",
619
+ "transformer.visual.transformer.resblocks.34.attn.in_proj.weight": "model-00004-of-00004.safetensors",
620
+ "transformer.visual.transformer.resblocks.34.attn.out_proj.bias": "model-00004-of-00004.safetensors",
621
+ "transformer.visual.transformer.resblocks.34.attn.out_proj.weight": "model-00004-of-00004.safetensors",
622
+ "transformer.visual.transformer.resblocks.34.ln_1.bias": "model-00004-of-00004.safetensors",
623
+ "transformer.visual.transformer.resblocks.34.ln_1.weight": "model-00004-of-00004.safetensors",
624
+ "transformer.visual.transformer.resblocks.34.ln_2.bias": "model-00004-of-00004.safetensors",
625
+ "transformer.visual.transformer.resblocks.34.ln_2.weight": "model-00004-of-00004.safetensors",
626
+ "transformer.visual.transformer.resblocks.34.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
627
+ "transformer.visual.transformer.resblocks.34.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
628
+ "transformer.visual.transformer.resblocks.34.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
629
+ "transformer.visual.transformer.resblocks.34.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
630
+ "transformer.visual.transformer.resblocks.35.attn.in_proj.bias": "model-00004-of-00004.safetensors",
631
+ "transformer.visual.transformer.resblocks.35.attn.in_proj.weight": "model-00004-of-00004.safetensors",
632
+ "transformer.visual.transformer.resblocks.35.attn.out_proj.bias": "model-00004-of-00004.safetensors",
633
+ "transformer.visual.transformer.resblocks.35.attn.out_proj.weight": "model-00004-of-00004.safetensors",
634
+ "transformer.visual.transformer.resblocks.35.ln_1.bias": "model-00004-of-00004.safetensors",
635
+ "transformer.visual.transformer.resblocks.35.ln_1.weight": "model-00004-of-00004.safetensors",
636
+ "transformer.visual.transformer.resblocks.35.ln_2.bias": "model-00004-of-00004.safetensors",
637
+ "transformer.visual.transformer.resblocks.35.ln_2.weight": "model-00004-of-00004.safetensors",
638
+ "transformer.visual.transformer.resblocks.35.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
639
+ "transformer.visual.transformer.resblocks.35.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
640
+ "transformer.visual.transformer.resblocks.35.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
641
+ "transformer.visual.transformer.resblocks.35.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
642
+ "transformer.visual.transformer.resblocks.36.attn.in_proj.bias": "model-00004-of-00004.safetensors",
643
+ "transformer.visual.transformer.resblocks.36.attn.in_proj.weight": "model-00004-of-00004.safetensors",
644
+ "transformer.visual.transformer.resblocks.36.attn.out_proj.bias": "model-00004-of-00004.safetensors",
645
+ "transformer.visual.transformer.resblocks.36.attn.out_proj.weight": "model-00004-of-00004.safetensors",
646
+ "transformer.visual.transformer.resblocks.36.ln_1.bias": "model-00004-of-00004.safetensors",
647
+ "transformer.visual.transformer.resblocks.36.ln_1.weight": "model-00004-of-00004.safetensors",
648
+ "transformer.visual.transformer.resblocks.36.ln_2.bias": "model-00004-of-00004.safetensors",
649
+ "transformer.visual.transformer.resblocks.36.ln_2.weight": "model-00004-of-00004.safetensors",
650
+ "transformer.visual.transformer.resblocks.36.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
651
+ "transformer.visual.transformer.resblocks.36.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
652
+ "transformer.visual.transformer.resblocks.36.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
653
+ "transformer.visual.transformer.resblocks.36.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
654
+ "transformer.visual.transformer.resblocks.37.attn.in_proj.bias": "model-00004-of-00004.safetensors",
655
+ "transformer.visual.transformer.resblocks.37.attn.in_proj.weight": "model-00004-of-00004.safetensors",
656
+ "transformer.visual.transformer.resblocks.37.attn.out_proj.bias": "model-00004-of-00004.safetensors",
657
+ "transformer.visual.transformer.resblocks.37.attn.out_proj.weight": "model-00004-of-00004.safetensors",
658
+ "transformer.visual.transformer.resblocks.37.ln_1.bias": "model-00004-of-00004.safetensors",
659
+ "transformer.visual.transformer.resblocks.37.ln_1.weight": "model-00004-of-00004.safetensors",
660
+ "transformer.visual.transformer.resblocks.37.ln_2.bias": "model-00004-of-00004.safetensors",
661
+ "transformer.visual.transformer.resblocks.37.ln_2.weight": "model-00004-of-00004.safetensors",
662
+ "transformer.visual.transformer.resblocks.37.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
663
+ "transformer.visual.transformer.resblocks.37.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
664
+ "transformer.visual.transformer.resblocks.37.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
665
+ "transformer.visual.transformer.resblocks.37.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
666
+ "transformer.visual.transformer.resblocks.38.attn.in_proj.bias": "model-00004-of-00004.safetensors",
667
+ "transformer.visual.transformer.resblocks.38.attn.in_proj.weight": "model-00004-of-00004.safetensors",
668
+ "transformer.visual.transformer.resblocks.38.attn.out_proj.bias": "model-00004-of-00004.safetensors",
669
+ "transformer.visual.transformer.resblocks.38.attn.out_proj.weight": "model-00004-of-00004.safetensors",
670
+ "transformer.visual.transformer.resblocks.38.ln_1.bias": "model-00004-of-00004.safetensors",
671
+ "transformer.visual.transformer.resblocks.38.ln_1.weight": "model-00004-of-00004.safetensors",
672
+ "transformer.visual.transformer.resblocks.38.ln_2.bias": "model-00004-of-00004.safetensors",
673
+ "transformer.visual.transformer.resblocks.38.ln_2.weight": "model-00004-of-00004.safetensors",
674
+ "transformer.visual.transformer.resblocks.38.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
675
+ "transformer.visual.transformer.resblocks.38.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
676
+ "transformer.visual.transformer.resblocks.38.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
677
+ "transformer.visual.transformer.resblocks.38.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
678
+ "transformer.visual.transformer.resblocks.39.attn.in_proj.bias": "model-00004-of-00004.safetensors",
679
+ "transformer.visual.transformer.resblocks.39.attn.in_proj.weight": "model-00004-of-00004.safetensors",
680
+ "transformer.visual.transformer.resblocks.39.attn.out_proj.bias": "model-00004-of-00004.safetensors",
681
+ "transformer.visual.transformer.resblocks.39.attn.out_proj.weight": "model-00004-of-00004.safetensors",
682
+ "transformer.visual.transformer.resblocks.39.ln_1.bias": "model-00004-of-00004.safetensors",
683
+ "transformer.visual.transformer.resblocks.39.ln_1.weight": "model-00004-of-00004.safetensors",
684
+ "transformer.visual.transformer.resblocks.39.ln_2.bias": "model-00004-of-00004.safetensors",
685
+ "transformer.visual.transformer.resblocks.39.ln_2.weight": "model-00004-of-00004.safetensors",
686
+ "transformer.visual.transformer.resblocks.39.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
687
+ "transformer.visual.transformer.resblocks.39.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
688
+ "transformer.visual.transformer.resblocks.39.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
689
+ "transformer.visual.transformer.resblocks.39.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
690
+ "transformer.visual.transformer.resblocks.4.attn.in_proj.bias": "model-00003-of-00004.safetensors",
691
+ "transformer.visual.transformer.resblocks.4.attn.in_proj.weight": "model-00003-of-00004.safetensors",
692
+ "transformer.visual.transformer.resblocks.4.attn.out_proj.bias": "model-00003-of-00004.safetensors",
693
+ "transformer.visual.transformer.resblocks.4.attn.out_proj.weight": "model-00003-of-00004.safetensors",
694
+ "transformer.visual.transformer.resblocks.4.ln_1.bias": "model-00003-of-00004.safetensors",
695
+ "transformer.visual.transformer.resblocks.4.ln_1.weight": "model-00003-of-00004.safetensors",
696
+ "transformer.visual.transformer.resblocks.4.ln_2.bias": "model-00003-of-00004.safetensors",
697
+ "transformer.visual.transformer.resblocks.4.ln_2.weight": "model-00003-of-00004.safetensors",
698
+ "transformer.visual.transformer.resblocks.4.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
699
+ "transformer.visual.transformer.resblocks.4.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
700
+ "transformer.visual.transformer.resblocks.4.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
701
+ "transformer.visual.transformer.resblocks.4.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
702
+ "transformer.visual.transformer.resblocks.40.attn.in_proj.bias": "model-00004-of-00004.safetensors",
703
+ "transformer.visual.transformer.resblocks.40.attn.in_proj.weight": "model-00004-of-00004.safetensors",
704
+ "transformer.visual.transformer.resblocks.40.attn.out_proj.bias": "model-00004-of-00004.safetensors",
705
+ "transformer.visual.transformer.resblocks.40.attn.out_proj.weight": "model-00004-of-00004.safetensors",
706
+ "transformer.visual.transformer.resblocks.40.ln_1.bias": "model-00004-of-00004.safetensors",
707
+ "transformer.visual.transformer.resblocks.40.ln_1.weight": "model-00004-of-00004.safetensors",
708
+ "transformer.visual.transformer.resblocks.40.ln_2.bias": "model-00004-of-00004.safetensors",
709
+ "transformer.visual.transformer.resblocks.40.ln_2.weight": "model-00004-of-00004.safetensors",
710
+ "transformer.visual.transformer.resblocks.40.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
711
+ "transformer.visual.transformer.resblocks.40.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
712
+ "transformer.visual.transformer.resblocks.40.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
713
+ "transformer.visual.transformer.resblocks.40.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
714
+ "transformer.visual.transformer.resblocks.41.attn.in_proj.bias": "model-00004-of-00004.safetensors",
715
+ "transformer.visual.transformer.resblocks.41.attn.in_proj.weight": "model-00004-of-00004.safetensors",
716
+ "transformer.visual.transformer.resblocks.41.attn.out_proj.bias": "model-00004-of-00004.safetensors",
717
+ "transformer.visual.transformer.resblocks.41.attn.out_proj.weight": "model-00004-of-00004.safetensors",
718
+ "transformer.visual.transformer.resblocks.41.ln_1.bias": "model-00004-of-00004.safetensors",
719
+ "transformer.visual.transformer.resblocks.41.ln_1.weight": "model-00004-of-00004.safetensors",
720
+ "transformer.visual.transformer.resblocks.41.ln_2.bias": "model-00004-of-00004.safetensors",
721
+ "transformer.visual.transformer.resblocks.41.ln_2.weight": "model-00004-of-00004.safetensors",
722
+ "transformer.visual.transformer.resblocks.41.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
723
+ "transformer.visual.transformer.resblocks.41.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
724
+ "transformer.visual.transformer.resblocks.41.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
725
+ "transformer.visual.transformer.resblocks.41.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
726
+ "transformer.visual.transformer.resblocks.42.attn.in_proj.bias": "model-00004-of-00004.safetensors",
727
+ "transformer.visual.transformer.resblocks.42.attn.in_proj.weight": "model-00004-of-00004.safetensors",
728
+ "transformer.visual.transformer.resblocks.42.attn.out_proj.bias": "model-00004-of-00004.safetensors",
729
+ "transformer.visual.transformer.resblocks.42.attn.out_proj.weight": "model-00004-of-00004.safetensors",
730
+ "transformer.visual.transformer.resblocks.42.ln_1.bias": "model-00004-of-00004.safetensors",
731
+ "transformer.visual.transformer.resblocks.42.ln_1.weight": "model-00004-of-00004.safetensors",
732
+ "transformer.visual.transformer.resblocks.42.ln_2.bias": "model-00004-of-00004.safetensors",
733
+ "transformer.visual.transformer.resblocks.42.ln_2.weight": "model-00004-of-00004.safetensors",
734
+ "transformer.visual.transformer.resblocks.42.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
735
+ "transformer.visual.transformer.resblocks.42.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
736
+ "transformer.visual.transformer.resblocks.42.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
737
+ "transformer.visual.transformer.resblocks.42.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
738
+ "transformer.visual.transformer.resblocks.43.attn.in_proj.bias": "model-00004-of-00004.safetensors",
739
+ "transformer.visual.transformer.resblocks.43.attn.in_proj.weight": "model-00004-of-00004.safetensors",
740
+ "transformer.visual.transformer.resblocks.43.attn.out_proj.bias": "model-00004-of-00004.safetensors",
741
+ "transformer.visual.transformer.resblocks.43.attn.out_proj.weight": "model-00004-of-00004.safetensors",
742
+ "transformer.visual.transformer.resblocks.43.ln_1.bias": "model-00004-of-00004.safetensors",
743
+ "transformer.visual.transformer.resblocks.43.ln_1.weight": "model-00004-of-00004.safetensors",
744
+ "transformer.visual.transformer.resblocks.43.ln_2.bias": "model-00004-of-00004.safetensors",
745
+ "transformer.visual.transformer.resblocks.43.ln_2.weight": "model-00004-of-00004.safetensors",
746
+ "transformer.visual.transformer.resblocks.43.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
747
+ "transformer.visual.transformer.resblocks.43.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
748
+ "transformer.visual.transformer.resblocks.43.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
749
+ "transformer.visual.transformer.resblocks.43.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
750
+ "transformer.visual.transformer.resblocks.44.attn.in_proj.bias": "model-00004-of-00004.safetensors",
751
+ "transformer.visual.transformer.resblocks.44.attn.in_proj.weight": "model-00004-of-00004.safetensors",
752
+ "transformer.visual.transformer.resblocks.44.attn.out_proj.bias": "model-00004-of-00004.safetensors",
753
+ "transformer.visual.transformer.resblocks.44.attn.out_proj.weight": "model-00004-of-00004.safetensors",
754
+ "transformer.visual.transformer.resblocks.44.ln_1.bias": "model-00004-of-00004.safetensors",
755
+ "transformer.visual.transformer.resblocks.44.ln_1.weight": "model-00004-of-00004.safetensors",
756
+ "transformer.visual.transformer.resblocks.44.ln_2.bias": "model-00004-of-00004.safetensors",
757
+ "transformer.visual.transformer.resblocks.44.ln_2.weight": "model-00004-of-00004.safetensors",
758
+ "transformer.visual.transformer.resblocks.44.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
759
+ "transformer.visual.transformer.resblocks.44.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
760
+ "transformer.visual.transformer.resblocks.44.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
761
+ "transformer.visual.transformer.resblocks.44.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
762
+ "transformer.visual.transformer.resblocks.45.attn.in_proj.bias": "model-00004-of-00004.safetensors",
763
+ "transformer.visual.transformer.resblocks.45.attn.in_proj.weight": "model-00004-of-00004.safetensors",
764
+ "transformer.visual.transformer.resblocks.45.attn.out_proj.bias": "model-00004-of-00004.safetensors",
765
+ "transformer.visual.transformer.resblocks.45.attn.out_proj.weight": "model-00004-of-00004.safetensors",
766
+ "transformer.visual.transformer.resblocks.45.ln_1.bias": "model-00004-of-00004.safetensors",
767
+ "transformer.visual.transformer.resblocks.45.ln_1.weight": "model-00004-of-00004.safetensors",
768
+ "transformer.visual.transformer.resblocks.45.ln_2.bias": "model-00004-of-00004.safetensors",
769
+ "transformer.visual.transformer.resblocks.45.ln_2.weight": "model-00004-of-00004.safetensors",
770
+ "transformer.visual.transformer.resblocks.45.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
771
+ "transformer.visual.transformer.resblocks.45.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
772
+ "transformer.visual.transformer.resblocks.45.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
773
+ "transformer.visual.transformer.resblocks.45.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
774
+ "transformer.visual.transformer.resblocks.46.attn.in_proj.bias": "model-00004-of-00004.safetensors",
775
+ "transformer.visual.transformer.resblocks.46.attn.in_proj.weight": "model-00004-of-00004.safetensors",
776
+ "transformer.visual.transformer.resblocks.46.attn.out_proj.bias": "model-00004-of-00004.safetensors",
777
+ "transformer.visual.transformer.resblocks.46.attn.out_proj.weight": "model-00004-of-00004.safetensors",
778
+ "transformer.visual.transformer.resblocks.46.ln_1.bias": "model-00004-of-00004.safetensors",
779
+ "transformer.visual.transformer.resblocks.46.ln_1.weight": "model-00004-of-00004.safetensors",
780
+ "transformer.visual.transformer.resblocks.46.ln_2.bias": "model-00004-of-00004.safetensors",
781
+ "transformer.visual.transformer.resblocks.46.ln_2.weight": "model-00004-of-00004.safetensors",
782
+ "transformer.visual.transformer.resblocks.46.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
783
+ "transformer.visual.transformer.resblocks.46.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
784
+ "transformer.visual.transformer.resblocks.46.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
785
+ "transformer.visual.transformer.resblocks.46.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
786
+ "transformer.visual.transformer.resblocks.47.attn.in_proj.bias": "model-00004-of-00004.safetensors",
787
+ "transformer.visual.transformer.resblocks.47.attn.in_proj.weight": "model-00004-of-00004.safetensors",
788
+ "transformer.visual.transformer.resblocks.47.attn.out_proj.bias": "model-00004-of-00004.safetensors",
789
+ "transformer.visual.transformer.resblocks.47.attn.out_proj.weight": "model-00004-of-00004.safetensors",
790
+ "transformer.visual.transformer.resblocks.47.ln_1.bias": "model-00004-of-00004.safetensors",
791
+ "transformer.visual.transformer.resblocks.47.ln_1.weight": "model-00004-of-00004.safetensors",
792
+ "transformer.visual.transformer.resblocks.47.ln_2.bias": "model-00004-of-00004.safetensors",
793
+ "transformer.visual.transformer.resblocks.47.ln_2.weight": "model-00004-of-00004.safetensors",
794
+ "transformer.visual.transformer.resblocks.47.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
795
+ "transformer.visual.transformer.resblocks.47.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
796
+ "transformer.visual.transformer.resblocks.47.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
797
+ "transformer.visual.transformer.resblocks.47.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
798
+ "transformer.visual.transformer.resblocks.5.attn.in_proj.bias": "model-00003-of-00004.safetensors",
799
+ "transformer.visual.transformer.resblocks.5.attn.in_proj.weight": "model-00003-of-00004.safetensors",
800
+ "transformer.visual.transformer.resblocks.5.attn.out_proj.bias": "model-00003-of-00004.safetensors",
801
+ "transformer.visual.transformer.resblocks.5.attn.out_proj.weight": "model-00003-of-00004.safetensors",
802
+ "transformer.visual.transformer.resblocks.5.ln_1.bias": "model-00003-of-00004.safetensors",
803
+ "transformer.visual.transformer.resblocks.5.ln_1.weight": "model-00003-of-00004.safetensors",
804
+ "transformer.visual.transformer.resblocks.5.ln_2.bias": "model-00003-of-00004.safetensors",
805
+ "transformer.visual.transformer.resblocks.5.ln_2.weight": "model-00003-of-00004.safetensors",
806
+ "transformer.visual.transformer.resblocks.5.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
807
+ "transformer.visual.transformer.resblocks.5.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
808
+ "transformer.visual.transformer.resblocks.5.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
809
+ "transformer.visual.transformer.resblocks.5.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
810
+ "transformer.visual.transformer.resblocks.6.attn.in_proj.bias": "model-00003-of-00004.safetensors",
811
+ "transformer.visual.transformer.resblocks.6.attn.in_proj.weight": "model-00003-of-00004.safetensors",
812
+ "transformer.visual.transformer.resblocks.6.attn.out_proj.bias": "model-00003-of-00004.safetensors",
813
+ "transformer.visual.transformer.resblocks.6.attn.out_proj.weight": "model-00003-of-00004.safetensors",
814
+ "transformer.visual.transformer.resblocks.6.ln_1.bias": "model-00003-of-00004.safetensors",
815
+ "transformer.visual.transformer.resblocks.6.ln_1.weight": "model-00003-of-00004.safetensors",
816
+ "transformer.visual.transformer.resblocks.6.ln_2.bias": "model-00003-of-00004.safetensors",
817
+ "transformer.visual.transformer.resblocks.6.ln_2.weight": "model-00003-of-00004.safetensors",
818
+ "transformer.visual.transformer.resblocks.6.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
819
+ "transformer.visual.transformer.resblocks.6.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
820
+ "transformer.visual.transformer.resblocks.6.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
821
+ "transformer.visual.transformer.resblocks.6.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
822
+ "transformer.visual.transformer.resblocks.7.attn.in_proj.bias": "model-00003-of-00004.safetensors",
823
+ "transformer.visual.transformer.resblocks.7.attn.in_proj.weight": "model-00003-of-00004.safetensors",
824
+ "transformer.visual.transformer.resblocks.7.attn.out_proj.bias": "model-00003-of-00004.safetensors",
825
+ "transformer.visual.transformer.resblocks.7.attn.out_proj.weight": "model-00003-of-00004.safetensors",
826
+ "transformer.visual.transformer.resblocks.7.ln_1.bias": "model-00003-of-00004.safetensors",
827
+ "transformer.visual.transformer.resblocks.7.ln_1.weight": "model-00003-of-00004.safetensors",
828
+ "transformer.visual.transformer.resblocks.7.ln_2.bias": "model-00003-of-00004.safetensors",
829
+ "transformer.visual.transformer.resblocks.7.ln_2.weight": "model-00003-of-00004.safetensors",
830
+ "transformer.visual.transformer.resblocks.7.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
831
+ "transformer.visual.transformer.resblocks.7.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
832
+ "transformer.visual.transformer.resblocks.7.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
833
+ "transformer.visual.transformer.resblocks.7.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
834
+ "transformer.visual.transformer.resblocks.8.attn.in_proj.bias": "model-00003-of-00004.safetensors",
835
+ "transformer.visual.transformer.resblocks.8.attn.in_proj.weight": "model-00003-of-00004.safetensors",
836
+ "transformer.visual.transformer.resblocks.8.attn.out_proj.bias": "model-00003-of-00004.safetensors",
837
+ "transformer.visual.transformer.resblocks.8.attn.out_proj.weight": "model-00003-of-00004.safetensors",
838
+ "transformer.visual.transformer.resblocks.8.ln_1.bias": "model-00003-of-00004.safetensors",
839
+ "transformer.visual.transformer.resblocks.8.ln_1.weight": "model-00003-of-00004.safetensors",
840
+ "transformer.visual.transformer.resblocks.8.ln_2.bias": "model-00003-of-00004.safetensors",
841
+ "transformer.visual.transformer.resblocks.8.ln_2.weight": "model-00003-of-00004.safetensors",
842
+ "transformer.visual.transformer.resblocks.8.mlp.c_fc.bias": "model-00003-of-00004.safetensors",
843
+ "transformer.visual.transformer.resblocks.8.mlp.c_fc.weight": "model-00003-of-00004.safetensors",
844
+ "transformer.visual.transformer.resblocks.8.mlp.c_proj.bias": "model-00003-of-00004.safetensors",
845
+ "transformer.visual.transformer.resblocks.8.mlp.c_proj.weight": "model-00003-of-00004.safetensors",
846
+ "transformer.visual.transformer.resblocks.9.attn.in_proj.bias": "model-00003-of-00004.safetensors",
847
+ "transformer.visual.transformer.resblocks.9.attn.in_proj.weight": "model-00003-of-00004.safetensors",
848
+ "transformer.visual.transformer.resblocks.9.attn.out_proj.bias": "model-00003-of-00004.safetensors",
849
+ "transformer.visual.transformer.resblocks.9.attn.out_proj.weight": "model-00003-of-00004.safetensors",
850
+ "transformer.visual.transformer.resblocks.9.ln_1.bias": "model-00003-of-00004.safetensors",
851
+ "transformer.visual.transformer.resblocks.9.ln_1.weight": "model-00003-of-00004.safetensors",
852
+ "transformer.visual.transformer.resblocks.9.ln_2.bias": "model-00003-of-00004.safetensors",
853
+ "transformer.visual.transformer.resblocks.9.ln_2.weight": "model-00003-of-00004.safetensors",
854
+ "transformer.visual.transformer.resblocks.9.mlp.c_fc.bias": "model-00004-of-00004.safetensors",
855
+ "transformer.visual.transformer.resblocks.9.mlp.c_fc.weight": "model-00004-of-00004.safetensors",
856
+ "transformer.visual.transformer.resblocks.9.mlp.c_proj.bias": "model-00004-of-00004.safetensors",
857
+ "transformer.visual.transformer.resblocks.9.mlp.c_proj.weight": "model-00004-of-00004.safetensors",
858
+ "transformer.wte.weight": "model-00001-of-00004.safetensors"
859
+ }
860
+ }
segagent/zzzmmz/SegAgent-Model/modeling_qwen.py ADDED
@@ -0,0 +1,1172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import importlib
7
+ import math
8
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import torch.utils.checkpoint
13
+ from torch.cuda.amp import autocast
14
+
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+ if TYPE_CHECKING:
20
+ from transformers.generation.streamers import BaseStreamer
21
+ from transformers.generation.utils import GenerateOutput
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ CausalLMOutputWithPast,
25
+ )
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from transformers.utils import logging
28
+
29
+ try:
30
+ from einops import rearrange
31
+ except ImportError:
32
+ rearrange = None
33
+ from torch import nn
34
+
35
+ SUPPORT_CUDA = torch.cuda.is_available()
36
+ SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
37
+ SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
38
+
39
+ from .configuration_qwen import QWenConfig
40
+ from .qwen_generation_utils import (
41
+ HistoryType,
42
+ make_context,
43
+ decode_tokens,
44
+ get_stop_words_ids,
45
+ StopWordsLogitsProcessor,
46
+ )
47
+ from .visual import VisionTransformer
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "qwen"
53
+ _CONFIG_FOR_DOC = "QWenConfig"
54
+
55
+ QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
56
+
57
+ _ERROR_BAD_CHAT_FORMAT = """\
58
+ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
59
+ If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
60
+ 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
61
+ 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
62
+ """
63
+
64
+ _SENTINEL = object()
65
+ _ERROR_STREAM_IN_CHAT = """\
66
+ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
67
+ 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
68
+ """
69
+
70
+ apply_rotary_emb_func = None
71
+ rms_norm = None
72
+ def int_list_to_str(lst: List[int]) -> str:
73
+ """将整数列表转换回字符串"""
74
+ return ''.join(chr(i) for i in lst)
75
+
76
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
77
+ def _make_causal_mask(
78
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
79
+ ):
80
+ """
81
+ Make causal mask used for bi-directional self-attention.
82
+ """
83
+ bsz, tgt_len = input_ids_shape
84
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
85
+ mask_cond = torch.arange(mask.size(-1), device=device)
86
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
87
+ mask = mask.to(dtype)
88
+
89
+ if past_key_values_length > 0:
90
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
91
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
92
+
93
+
94
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
95
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
96
+ """
97
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
98
+ """
99
+ bsz, src_len = mask.size()
100
+ tgt_len = tgt_len if tgt_len is not None else src_len
101
+
102
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
103
+
104
+ inverted_mask = 1.0 - expanded_mask
105
+
106
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
107
+
108
+
109
+ class QWenAttention(nn.Module):
110
+ def __init__(self, config):
111
+ super().__init__()
112
+
113
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
114
+ self.seq_length = config.seq_length
115
+
116
+ self.hidden_size = config.hidden_size
117
+ self.split_size = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.head_dim = self.hidden_size // self.num_heads
120
+
121
+ self.scale_attn_weights = True
122
+
123
+ self.projection_size = config.kv_channels * config.num_attention_heads
124
+
125
+ assert self.projection_size % config.num_attention_heads == 0
126
+ self.hidden_size_per_attention_head = (
127
+ self.projection_size // config.num_attention_heads
128
+ )
129
+
130
+ self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
131
+
132
+ self.c_proj = nn.Linear(
133
+ config.hidden_size, self.projection_size, bias=not config.no_bias
134
+ )
135
+
136
+ self.is_fp32 = not (config.bf16 or config.fp16)
137
+ self.bf16 = config.bf16
138
+
139
+ self.use_dynamic_ntk = config.use_dynamic_ntk
140
+ self.use_logn_attn = config.use_logn_attn
141
+
142
+ logn_list = [
143
+ math.log(i, self.seq_length) if i > self.seq_length else 1
144
+ for i in range(1, 32768)
145
+ ]
146
+ self.logn_tensor = torch.tensor(logn_list)[None, :, None, None]
147
+
148
+ self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
149
+
150
+ def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
151
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
152
+
153
+ if self.scale_attn_weights:
154
+ attn_weights = attn_weights / torch.full(
155
+ [],
156
+ value.size(-1) ** 0.5,
157
+ dtype=attn_weights.dtype,
158
+ device=attn_weights.device,
159
+ )
160
+
161
+ query_length, key_length = query.size(-2), key.size(-2)
162
+ # causal_mask = self.bias[
163
+ # :, :, key_length - query_length : key_length, :key_length
164
+ # ]
165
+ # mask_value = torch.finfo(attn_weights.dtype).min
166
+ # mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
167
+ # attn_weights.device
168
+ # )
169
+ # attn_weights = torch.where(
170
+ # causal_mask, attn_weights.to(attn_weights.dtype), mask_value
171
+ # )
172
+ attn_weights = attn_weights + attention_mask
173
+
174
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
175
+
176
+ attn_weights = attn_weights.type(value.dtype)
177
+ attn_weights = self.attn_dropout(attn_weights)
178
+
179
+ if head_mask is not None:
180
+ attn_weights = attn_weights * head_mask
181
+
182
+ attn_output = torch.matmul(attn_weights, value)
183
+ attn_output = attn_output.transpose(1, 2)
184
+
185
+ return attn_output, attn_weights
186
+
187
+ def _upcast_and_reordered_attn(
188
+ self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
189
+ ):
190
+ bsz, num_heads, q_seq_len, dk = query.size()
191
+ _, _, k_seq_len, _ = key.size()
192
+
193
+ attn_weights = torch.empty(
194
+ bsz * num_heads,
195
+ q_seq_len,
196
+ k_seq_len,
197
+ dtype=torch.float32,
198
+ device=query.device,
199
+ )
200
+
201
+ scale_factor = 1.0
202
+ if self.scale_attn_weights:
203
+ scale_factor /= float(value.size(-1)) ** 0.5
204
+
205
+ with autocast(enabled=False):
206
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
207
+ -1, dk, k_seq_len
208
+ )
209
+ attn_weights = torch.baddbmm(
210
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
211
+ )
212
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
213
+
214
+ query_length, key_length = query.size(-2), key.size(-2)
215
+ causal_mask = registered_causal_mask[
216
+ :, :, key_length - query_length : key_length, :key_length
217
+ ]
218
+ mask_value = torch.finfo(attn_weights.dtype).min
219
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
220
+ attn_weights.device
221
+ )
222
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
223
+
224
+ if attention_mask is not None:
225
+ attn_weights = attn_weights + attention_mask
226
+
227
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
228
+
229
+ if attn_weights.dtype != torch.float32:
230
+ raise RuntimeError(
231
+ "Error with upcasting, attn_weights does not have dtype torch.float32"
232
+ )
233
+ attn_weights = attn_weights.type(value.dtype)
234
+ attn_weights = self.attn_dropout(attn_weights)
235
+
236
+ if head_mask is not None:
237
+ attn_weights = attn_weights * head_mask
238
+
239
+ attn_output = torch.matmul(attn_weights, value)
240
+
241
+ return attn_output, attn_weights
242
+
243
+ def _split_heads(self, tensor, num_heads, attn_head_size):
244
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
245
+ tensor = tensor.view(new_shape)
246
+ return tensor
247
+
248
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
249
+ tensor = tensor.contiguous()
250
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
251
+ return tensor.view(new_shape)
252
+
253
+ def forward(
254
+ self,
255
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
256
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
257
+ registered_causal_mask: Optional[torch.Tensor] = None,
258
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
259
+ attention_mask: Optional[torch.FloatTensor] = None,
260
+ head_mask: Optional[torch.FloatTensor] = None,
261
+ encoder_hidden_states: Optional[torch.Tensor] = None,
262
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
263
+ output_attentions: Optional[bool] = False,
264
+ use_cache: Optional[bool] = False,
265
+ ):
266
+
267
+ mixed_x_layer = self.c_attn(hidden_states)
268
+
269
+ query, key, value = mixed_x_layer.split(self.split_size, dim=2)
270
+
271
+ query = self._split_heads(query, self.num_heads, self.head_dim)
272
+ key = self._split_heads(key, self.num_heads, self.head_dim)
273
+ value = self._split_heads(value, self.num_heads, self.head_dim)
274
+
275
+ if rotary_pos_emb is not None:
276
+ cur_len = query.shape[1]
277
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
278
+ rotary_pos_emb = (rotary_pos_emb,) * 2
279
+ q_pos_emb, k_pos_emb = rotary_pos_emb
280
+ # Slice the pos emb for current inference
281
+ query = apply_rotary_pos_emb(query, q_pos_emb)
282
+ key = apply_rotary_pos_emb(key, k_pos_emb)
283
+
284
+ if layer_past is not None:
285
+ past_key, past_value = layer_past[0], layer_past[1]
286
+ key = torch.cat((past_key, key), dim=1)
287
+ value = torch.cat((past_value, value), dim=1)
288
+
289
+ if use_cache:
290
+ present = (key, value)
291
+ else:
292
+ present = None
293
+
294
+ if self.use_logn_attn and not self.training:
295
+ if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype:
296
+ self.logn_tensor = self.logn_tensor.to(query.device).type_as(query)
297
+ seq_start = key.size(1) - query.size(1)
298
+ seq_end = key.size(1)
299
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
300
+ query = query * logn_tensor.expand_as(query)
301
+
302
+ query = query.permute(0, 2, 1, 3)
303
+ key = key.permute(0, 2, 1, 3)
304
+ value = value.permute(0, 2, 1, 3)
305
+ attn_output, attn_weight = self._attn(
306
+ query, key, value, registered_causal_mask, attention_mask, head_mask
307
+ )
308
+ context_layer = self._merge_heads(
309
+ attn_output, self.num_heads, self.head_dim
310
+ )
311
+
312
+ attn_output = self.c_proj(context_layer)
313
+
314
+ outputs = (attn_output, present)
315
+ if output_attentions:
316
+ outputs += (attn_weight,)
317
+
318
+ return outputs
319
+
320
+
321
+ class QWenMLP(nn.Module):
322
+ def __init__(self, config):
323
+ super().__init__()
324
+ self.w1 = nn.Linear(
325
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
326
+ )
327
+ self.w2 = nn.Linear(
328
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
329
+ )
330
+ ff_dim_in = config.intermediate_size // 2
331
+ self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
332
+
333
+ def forward(self, hidden_states):
334
+ a1 = self.w1(hidden_states)
335
+ a2 = self.w2(hidden_states)
336
+ intermediate_parallel = a1 * F.silu(a2)
337
+ output = self.c_proj(intermediate_parallel)
338
+ return output
339
+
340
+ class QWenBlock(nn.Module):
341
+ def __init__(self, config):
342
+ super().__init__()
343
+ hidden_size = config.hidden_size
344
+ self.bf16 = config.bf16
345
+
346
+ self.ln_1 = RMSNorm(
347
+ hidden_size,
348
+ eps=config.layer_norm_epsilon,
349
+ )
350
+ self.attn = QWenAttention(config)
351
+ self.ln_2 = RMSNorm(
352
+ hidden_size,
353
+ eps=config.layer_norm_epsilon,
354
+ )
355
+
356
+ self.mlp = QWenMLP(config)
357
+
358
+ def forward(
359
+ self,
360
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
361
+ rotary_pos_emb: Optional[List[torch.Tensor]] = None,
362
+ registered_causal_mask: Optional[torch.Tensor] = None,
363
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
364
+ attention_mask: Optional[torch.FloatTensor] = None,
365
+ head_mask: Optional[torch.FloatTensor] = None,
366
+ encoder_hidden_states: Optional[torch.Tensor] = None,
367
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
368
+ use_cache: Optional[bool] = False,
369
+ output_attentions: Optional[bool] = False,
370
+ ):
371
+ layernorm_output = self.ln_1(hidden_states)
372
+
373
+ attn_outputs = self.attn(
374
+ layernorm_output,
375
+ rotary_pos_emb,
376
+ registered_causal_mask=registered_causal_mask,
377
+ layer_past=layer_past,
378
+ attention_mask=attention_mask,
379
+ head_mask=head_mask,
380
+ use_cache=use_cache,
381
+ output_attentions=output_attentions,
382
+ )
383
+ attn_output = attn_outputs[0]
384
+
385
+ outputs = attn_outputs[1:]
386
+
387
+ residual = hidden_states
388
+ layernorm_input = attn_output + residual
389
+
390
+ layernorm_output = self.ln_2(layernorm_input)
391
+
392
+ residual = layernorm_input
393
+ mlp_output = self.mlp(layernorm_output)
394
+ hidden_states = residual + mlp_output
395
+
396
+ if use_cache:
397
+ outputs = (hidden_states,) + outputs
398
+ else:
399
+ outputs = (hidden_states,) + outputs[1:]
400
+
401
+ return outputs
402
+
403
+
404
+ class QWenPreTrainedModel(PreTrainedModel):
405
+ config_class = QWenConfig
406
+ base_model_prefix = "transformer"
407
+ is_parallelizable = False
408
+ supports_gradient_checkpointing = True
409
+ _no_split_modules = ["QWenBlock"]
410
+
411
+ def __init__(self, *inputs, **kwargs):
412
+ super().__init__(*inputs, **kwargs)
413
+
414
+ def _init_weights(self, module):
415
+ """Initialize the weights."""
416
+ if isinstance(module, nn.Linear):
417
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
418
+ if module.bias is not None:
419
+ module.bias.data.zero_()
420
+ elif isinstance(module, nn.Embedding):
421
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
422
+ if module.padding_idx is not None:
423
+ module.weight.data[module.padding_idx].zero_()
424
+ elif isinstance(module, RMSNorm):
425
+ module.weight.data.fill_(1.0)
426
+
427
+ for name, p in module.named_parameters():
428
+ if name == "c_proj.weight":
429
+ p.data.normal_(
430
+ mean=0.0,
431
+ std=(
432
+ self.config.initializer_range
433
+ / math.sqrt(2 * self.config.num_hidden_layers)
434
+ ),
435
+ )
436
+
437
+ def _set_gradient_checkpointing(self, module, value=False):
438
+ if isinstance(module, QWenModel):
439
+ module.gradient_checkpointing = value
440
+
441
+
442
+ class QWenModel(QWenPreTrainedModel):
443
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
444
+
445
+ def __init__(self, config):
446
+ super().__init__(config)
447
+ self.vocab_size = config.vocab_size
448
+ self.num_hidden_layers = config.num_hidden_layers
449
+ self.embed_dim = config.hidden_size
450
+
451
+ self.gradient_checkpointing = False
452
+ self.use_dynamic_ntk = config.use_dynamic_ntk
453
+ self.seq_length = config.seq_length
454
+
455
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
456
+
457
+ self.drop = nn.Dropout(config.emb_dropout_prob)
458
+
459
+ if config.rotary_pct == 1.0:
460
+ self.rotary_ndims = None
461
+ else:
462
+ assert config.rotary_pct < 1
463
+ self.rotary_ndims = int(
464
+ config.kv_channels * config.rotary_pct
465
+ )
466
+ dim = (
467
+ self.rotary_ndims
468
+ if self.rotary_ndims is not None
469
+ else config.kv_channels
470
+ )
471
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
472
+
473
+ self.use_flash_attn = config.use_flash_attn
474
+ self.is_fp32 = not (config.bf16 or config.fp16)
475
+ self.registered_causal_mask = None
476
+ # if (
477
+ # self.use_flash_attn
478
+ # and flash_attn_unpadded_func is not None
479
+ # and not self.is_fp32
480
+ # ):
481
+ # self.registered_causal_mask = None
482
+ # else:
483
+ # max_positions = config.max_position_embeddings
484
+ # self.register_buffer(
485
+ # "registered_causal_mask",
486
+ # torch.tril(
487
+ # torch.ones((max_positions, max_positions), dtype=torch.bool)
488
+ # ).view(1, 1, max_positions, max_positions),
489
+ # persistent=False,
490
+ # )
491
+
492
+ self.h = nn.ModuleList(
493
+ [
494
+ QWenBlock(
495
+ config
496
+ )
497
+ for i in range(config.num_hidden_layers)
498
+ ]
499
+ )
500
+ self.ln_f = RMSNorm(
501
+ self.embed_dim,
502
+ eps=config.layer_norm_epsilon,
503
+ )
504
+
505
+ self.visual = VisionTransformer(**config.visual)
506
+
507
+ self.post_init()
508
+
509
+ def get_input_embeddings(self):
510
+ return self.wte
511
+
512
+ def set_input_embeddings(self, new_embeddings):
513
+ self.wte = new_embeddings
514
+
515
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
516
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
517
+ # create causal mask
518
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
519
+ combined_attention_mask = None
520
+ if input_shape[-1] > 1:
521
+ combined_attention_mask = _make_causal_mask(
522
+ input_shape,
523
+ inputs_embeds.dtype,
524
+ device=inputs_embeds.device,
525
+ past_key_values_length=past_key_values_length,
526
+ )
527
+
528
+ if attention_mask is not None:
529
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
530
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
531
+ inputs_embeds.device
532
+ )
533
+ combined_attention_mask = (
534
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
535
+ )
536
+
537
+ return combined_attention_mask
538
+
539
+
540
+ def forward(
541
+ self,
542
+ input_ids: Optional[torch.LongTensor] = None,
543
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
544
+ attention_mask: Optional[torch.FloatTensor] = None,
545
+ token_type_ids: Optional[torch.LongTensor] = None,
546
+ position_ids: Optional[torch.LongTensor] = None,
547
+ head_mask: Optional[torch.FloatTensor] = None,
548
+ inputs_embeds: Optional[torch.FloatTensor] = None,
549
+ encoder_hidden_states: Optional[torch.Tensor] = None,
550
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
551
+ use_cache: Optional[bool] = None,
552
+ output_attentions: Optional[bool] = None,
553
+ output_hidden_states: Optional[bool] = None,
554
+ return_dict: Optional[bool] = None,
555
+ masks_ids: Optional[torch.LongTensor] = None,
556
+ ):
557
+ if past_key_values is None and torch.any(input_ids == self.config.visual['image_start_id']):
558
+ bos_pos = torch.where(input_ids == self.config.visual['image_start_id'])
559
+ eos_pos = torch.where(input_ids == self.config.visual['image_start_id'] + 1)
560
+ assert (bos_pos[0] == eos_pos[0]).all()
561
+ img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1)
562
+ images = []
563
+ masks = []
564
+ for i, a, b in img_pos:
565
+ image = input_ids[i][a + 1 : b - 1].tolist()
566
+ image = image[ : image.index(self.config.visual['image_start_id'] + 2)]
567
+ images.append(bytes(image).decode('utf-8'))
568
+ if masks_ids is not None:
569
+ mask = int_list_to_str(masks_ids[i][masks_ids[i] != -1].tolist())
570
+ masks.append(mask)
571
+ else:
572
+ masks.append('')
573
+ images = self.visual.encode(images,masks)
574
+ assert images.shape[0] == len(images)
575
+ fake_images = None
576
+ elif self.training:
577
+ fake_images=torch.zeros(1,3,224,224).to(
578
+ dtype=self.visual.conv1.weight.dtype, device=self.visual.conv1.weight.device)
579
+ images = self.visual(fake_images)
580
+ else:
581
+ fake_images = None
582
+ images = None
583
+
584
+ output_attentions = (
585
+ output_attentions
586
+ if output_attentions is not None
587
+ else self.config.output_attentions
588
+ )
589
+ output_hidden_states = (
590
+ output_hidden_states
591
+ if output_hidden_states is not None
592
+ else self.config.output_hidden_states
593
+ )
594
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
595
+ return_dict = (
596
+ return_dict if return_dict is not None else self.config.use_return_dict
597
+ )
598
+
599
+ if input_ids is not None and inputs_embeds is not None:
600
+ raise ValueError(
601
+ "You cannot specify both input_ids and inputs_embeds at the same time"
602
+ )
603
+ elif input_ids is not None:
604
+ input_shape = input_ids.size()
605
+ input_ids = input_ids.view(-1, input_shape[-1])
606
+ batch_size = input_ids.shape[0]
607
+ elif inputs_embeds is not None:
608
+ input_shape = inputs_embeds.size()[:-1]
609
+ batch_size = inputs_embeds.shape[0]
610
+ else:
611
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
612
+
613
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
614
+
615
+ if token_type_ids is not None:
616
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
617
+ if position_ids is not None:
618
+ position_ids = position_ids.view(-1, input_shape[-1])
619
+
620
+ if past_key_values is None:
621
+ past_length = 0
622
+ past_key_values = tuple([None] * len(self.h))
623
+ else:
624
+ past_length = past_key_values[0][0].size(-2)
625
+
626
+ if position_ids is None:
627
+ position_ids = torch.arange(
628
+ past_length,
629
+ input_shape[-1] + past_length,
630
+ dtype=torch.long,
631
+ device=device,
632
+ )
633
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
634
+
635
+ encoder_attention_mask = None
636
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
637
+
638
+ if inputs_embeds is None:
639
+ inputs_embeds = self.wte(input_ids)
640
+
641
+ if batch_size <= 0:
642
+ raise ValueError("batch_size has to be defined and > 0")
643
+ attention_mask = self._prepare_decoder_attention_mask(
644
+ attention_mask, input_shape, inputs_embeds, past_length
645
+ )
646
+
647
+ hidden_states = inputs_embeds
648
+
649
+ kv_seq_len = hidden_states.size()[1]
650
+ if past_key_values[0] is not None:
651
+ # past key values[0][0] shape: bs * seq_len * head_num * dim
652
+ kv_seq_len += past_key_values[0][0].shape[1]
653
+ if (
654
+ self.use_dynamic_ntk
655
+ and kv_seq_len == hidden_states.size()[1]
656
+ and not self.training
657
+ ):
658
+ context_value = math.log(kv_seq_len / self.seq_length, 2) + 1
659
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
660
+ ntk_alpha = max(ntk_alpha, 1)
661
+ else:
662
+ ntk_alpha = self.rotary_emb._ntk_alpha_cached
663
+
664
+ rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha)
665
+ for idx in range(len(rotary_pos_emb)):
666
+ rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device)
667
+
668
+ hidden_states = self.drop(hidden_states).clone()
669
+ if fake_images is not None:
670
+ hidden_states = hidden_states + images.mean()*0
671
+ elif images is not None:
672
+ for idx, (i, a, b) in enumerate(img_pos):
673
+ hidden_states[i][a + 1 : b] = images[idx]
674
+ output_shape = input_shape + (hidden_states.size(-1),)
675
+
676
+ if self.gradient_checkpointing and self.training:
677
+ if use_cache:
678
+ logger.warning_once(
679
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
680
+ )
681
+ use_cache = False
682
+
683
+ presents = () if use_cache else None
684
+ all_self_attentions = () if output_attentions else None
685
+ all_hidden_states = () if output_hidden_states else None
686
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
687
+
688
+ if output_hidden_states:
689
+ all_hidden_states = all_hidden_states + (hidden_states,)
690
+
691
+ if self.gradient_checkpointing and self.training:
692
+
693
+ def create_custom_forward(module):
694
+ def custom_forward(*inputs):
695
+ # None for past_key_value
696
+ return module(*inputs, use_cache, output_attentions)
697
+
698
+ return custom_forward
699
+
700
+ outputs = torch.utils.checkpoint.checkpoint(
701
+ create_custom_forward(block),
702
+ hidden_states,
703
+ rotary_pos_emb,
704
+ self.registered_causal_mask,
705
+ None,
706
+ attention_mask,
707
+ head_mask[i],
708
+ encoder_hidden_states,
709
+ encoder_attention_mask,
710
+ )
711
+ else:
712
+ outputs = block(
713
+ hidden_states,
714
+ layer_past=layer_past,
715
+ rotary_pos_emb=rotary_pos_emb,
716
+ registered_causal_mask=self.registered_causal_mask,
717
+ attention_mask=attention_mask,
718
+ head_mask=head_mask[i],
719
+ encoder_hidden_states=encoder_hidden_states,
720
+ encoder_attention_mask=encoder_attention_mask,
721
+ use_cache=use_cache,
722
+ output_attentions=output_attentions,
723
+ )
724
+
725
+ hidden_states = outputs[0]
726
+ if use_cache is True:
727
+ presents = presents + (outputs[1],)
728
+
729
+ if output_attentions:
730
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
731
+
732
+ hidden_states = self.ln_f(hidden_states)
733
+ hidden_states = hidden_states.view(output_shape)
734
+ # Add last hidden state
735
+ if output_hidden_states:
736
+ all_hidden_states = all_hidden_states + (hidden_states,)
737
+
738
+ if not return_dict:
739
+ return tuple(
740
+ v for v in [hidden_states, presents, all_hidden_states] if v is not None
741
+ )
742
+
743
+ return BaseModelOutputWithPast(
744
+ last_hidden_state=hidden_states,
745
+ past_key_values=presents,
746
+ hidden_states=all_hidden_states,
747
+ attentions=all_self_attentions,
748
+ )
749
+
750
+
751
+ class QWenLMHeadModel(QWenPreTrainedModel):
752
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
753
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
754
+
755
+ def __init__(self, config):
756
+ super().__init__(config)
757
+ assert (
758
+ config.bf16 + config.fp16 + config.fp32 <= 1
759
+ ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
760
+
761
+ autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
762
+
763
+ if autoset_precision:
764
+ if SUPPORT_BF16:
765
+ logger.warn(
766
+ "The model is automatically converting to bf16 for faster inference. "
767
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
768
+ )
769
+ config.bf16 = True
770
+ elif SUPPORT_FP16:
771
+ logger.warn(
772
+ "The model is automatically converting to fp16 for faster inference. "
773
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
774
+ )
775
+ config.fp16 = True
776
+ else:
777
+ config.fp32 = True
778
+
779
+ if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
780
+ logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
781
+ if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
782
+ logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
783
+ if config.fp32:
784
+ if SUPPORT_BF16:
785
+ logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
786
+ elif SUPPORT_FP16:
787
+ logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
788
+
789
+ self.transformer = QWenModel(config)
790
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
791
+
792
+ if config.bf16:
793
+ self.transformer.bfloat16()
794
+ self.lm_head.bfloat16()
795
+ if config.fp16:
796
+ self.transformer.half()
797
+ self.lm_head.half()
798
+ self.post_init()
799
+
800
+ def get_output_embeddings(self):
801
+ return self.lm_head
802
+
803
+ def set_output_embeddings(self, new_embeddings):
804
+ self.lm_head = new_embeddings
805
+
806
+ def prepare_inputs_for_generation(
807
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
808
+ ):
809
+ token_type_ids = kwargs.get("token_type_ids", None)
810
+ if past_key_values:
811
+ input_ids = input_ids[:, -1].unsqueeze(-1)
812
+ if token_type_ids is not None:
813
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
814
+
815
+ attention_mask = kwargs.get("attention_mask", None)
816
+ position_ids = kwargs.get("position_ids", None)
817
+
818
+ if attention_mask is not None and position_ids is None:
819
+ position_ids = attention_mask.long().cumsum(-1) - 1
820
+ position_ids.masked_fill_(attention_mask == 0, 1)
821
+ if past_key_values:
822
+ position_ids = position_ids[:, -1].unsqueeze(-1)
823
+ else:
824
+ position_ids = None
825
+
826
+ if inputs_embeds is not None and past_key_values is None:
827
+ model_inputs = {"inputs_embeds": inputs_embeds}
828
+ else:
829
+ model_inputs = {"input_ids": input_ids}
830
+
831
+ model_inputs.update(
832
+ {
833
+ "past_key_values": past_key_values,
834
+ "use_cache": kwargs.get("use_cache"),
835
+ "position_ids": position_ids,
836
+ "attention_mask": attention_mask,
837
+ "token_type_ids": token_type_ids,
838
+ }
839
+ )
840
+ return model_inputs
841
+
842
+ def forward(
843
+ self,
844
+ input_ids: Optional[torch.LongTensor] = None,
845
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
846
+ attention_mask: Optional[torch.FloatTensor] = None,
847
+ token_type_ids: Optional[torch.LongTensor] = None,
848
+ position_ids: Optional[torch.LongTensor] = None,
849
+ head_mask: Optional[torch.FloatTensor] = None,
850
+ inputs_embeds: Optional[torch.FloatTensor] = None,
851
+ encoder_hidden_states: Optional[torch.Tensor] = None,
852
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
853
+ labels: Optional[torch.LongTensor] = None,
854
+ use_cache: Optional[bool] = None,
855
+ output_attentions: Optional[bool] = None,
856
+ output_hidden_states: Optional[bool] = None,
857
+ return_dict: Optional[bool] = None,
858
+ masks_ids: Optional[torch.LongTensor] = None,
859
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
860
+
861
+ return_dict = (
862
+ return_dict if return_dict is not None else self.config.use_return_dict
863
+ )
864
+
865
+ transformer_outputs = self.transformer(
866
+ input_ids,
867
+ past_key_values=past_key_values,
868
+ attention_mask=attention_mask,
869
+ token_type_ids=token_type_ids,
870
+ position_ids=position_ids,
871
+ head_mask=head_mask,
872
+ inputs_embeds=inputs_embeds,
873
+ encoder_hidden_states=encoder_hidden_states,
874
+ encoder_attention_mask=encoder_attention_mask,
875
+ use_cache=use_cache,
876
+ output_attentions=output_attentions,
877
+ output_hidden_states=output_hidden_states,
878
+ return_dict=return_dict,
879
+ masks_ids=masks_ids,
880
+ )
881
+ hidden_states = transformer_outputs[0]
882
+
883
+ lm_logits = self.lm_head(hidden_states)
884
+
885
+ loss = None
886
+ if labels is not None:
887
+ labels = labels.to(lm_logits.device)
888
+ shift_logits = lm_logits[..., :-1, :].contiguous()
889
+ shift_labels = labels[..., 1:].contiguous()
890
+ loss_fct = CrossEntropyLoss()
891
+ loss = loss_fct(
892
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
893
+ )
894
+
895
+ if not return_dict:
896
+ output = (lm_logits,) + transformer_outputs[1:]
897
+ return ((loss,) + output) if loss is not None else output
898
+
899
+ return CausalLMOutputWithPast(
900
+ loss=loss,
901
+ logits=lm_logits,
902
+ past_key_values=transformer_outputs.past_key_values,
903
+ hidden_states=transformer_outputs.hidden_states,
904
+ attentions=transformer_outputs.attentions,
905
+ )
906
+
907
+ @staticmethod
908
+ def _reorder_cache(
909
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
910
+ ) -> Tuple[Tuple[torch.Tensor]]:
911
+
912
+ return tuple(
913
+ tuple(
914
+ past_state.index_select(0, beam_idx.to(past_state.device))
915
+ for past_state in layer_past
916
+ )
917
+ for layer_past in past_key_values
918
+ )
919
+
920
+ def chat(
921
+ self,
922
+ tokenizer: PreTrainedTokenizer,
923
+ query: str,
924
+ history: Optional[HistoryType],
925
+ system: str = "You are a helpful assistant.",
926
+ append_history: bool = True,
927
+ stream: Optional[bool] = _SENTINEL,
928
+ stop_words_ids: Optional[List[List[int]]] = None,
929
+ generation_config: Optional[GenerationConfig] = None,
930
+ **kwargs,
931
+ ) -> Tuple[str, HistoryType]:
932
+ generation_config = generation_config if generation_config is not None else self.generation_config
933
+
934
+ assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
935
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
936
+ if history is None:
937
+ history = []
938
+ if stop_words_ids is None:
939
+ stop_words_ids = []
940
+
941
+ max_window_size = kwargs.get('max_window_size', None)
942
+ if max_window_size is None:
943
+ max_window_size = generation_config.max_window_size
944
+ raw_text, context_tokens = make_context(
945
+ tokenizer,
946
+ query,
947
+ history=history,
948
+ system=system,
949
+ max_window_size=max_window_size,
950
+ chat_format=generation_config.chat_format,
951
+ )
952
+
953
+ stop_words_ids.extend(get_stop_words_ids(
954
+ generation_config.chat_format, tokenizer
955
+ ))
956
+ input_ids = torch.tensor([context_tokens]).to(self.device)
957
+ outputs = self.generate(
958
+ input_ids,
959
+ stop_words_ids=stop_words_ids,
960
+ return_dict_in_generate=False,
961
+ generation_config=generation_config,
962
+ **kwargs,
963
+ )
964
+
965
+ response = decode_tokens(
966
+ outputs[0],
967
+ tokenizer,
968
+ raw_text_len=len(raw_text),
969
+ context_length=len(context_tokens),
970
+ chat_format=generation_config.chat_format,
971
+ verbose=False,
972
+ errors='replace'
973
+ )
974
+
975
+ if append_history:
976
+ history.append((query, response))
977
+
978
+ return response, history
979
+
980
+ def chat_stream(
981
+ self,
982
+ tokenizer: PreTrainedTokenizer,
983
+ query: str,
984
+ history: Optional[HistoryType],
985
+ system: str = "You are a helpful assistant.",
986
+ stop_words_ids: Optional[List[List[int]]] = None,
987
+ logits_processor: Optional[LogitsProcessorList] = None,
988
+ generation_config: Optional[GenerationConfig] = None,
989
+ **kwargs,
990
+ ) -> Generator[str, Any, None]:
991
+ generation_config = generation_config if generation_config is not None else self.generation_config
992
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
993
+ if history is None:
994
+ history = []
995
+ if stop_words_ids is None:
996
+ stop_words_ids = []
997
+
998
+ max_window_size = kwargs.get('max_window_size', None)
999
+ if max_window_size is None:
1000
+ max_window_size = generation_config.max_window_size
1001
+ raw_text, context_tokens = make_context(
1002
+ tokenizer,
1003
+ query,
1004
+ history=history,
1005
+ system=system,
1006
+ max_window_size=max_window_size,
1007
+ chat_format=generation_config.chat_format,
1008
+ )
1009
+
1010
+ stop_words_ids.extend(get_stop_words_ids(
1011
+ generation_config.chat_format, tokenizer
1012
+ ))
1013
+ if stop_words_ids is not None:
1014
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1015
+ stop_words_ids=stop_words_ids,
1016
+ eos_token_id=generation_config.eos_token_id,
1017
+ )
1018
+ if logits_processor is None:
1019
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1020
+ else:
1021
+ logits_processor.append(stop_words_logits_processor)
1022
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1023
+
1024
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
1025
+ self.__class__.generate_stream = NewGenerationMixin.generate
1026
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
1027
+ stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
1028
+
1029
+ def stream_generator():
1030
+ outputs = []
1031
+ for token in self.generate_stream(
1032
+ input_ids,
1033
+ return_dict_in_generate=False,
1034
+ generation_config=stream_config,
1035
+ logits_processor=logits_processor,
1036
+ seed=-1,
1037
+ **kwargs):
1038
+ outputs.append(token.item())
1039
+ yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore', keep_image_special=True)
1040
+
1041
+ return stream_generator()
1042
+
1043
+ def generate(
1044
+ self,
1045
+ inputs: Optional[torch.Tensor] = None,
1046
+ generation_config: Optional[GenerationConfig] = None,
1047
+ logits_processor: Optional[LogitsProcessorList] = None,
1048
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1049
+ prefix_allowed_tokens_fn: Optional[
1050
+ Callable[[int, torch.Tensor], List[int]]
1051
+ ] = None,
1052
+ synced_gpus: Optional[bool] = None,
1053
+ assistant_model: Optional["PreTrainedModel"] = None,
1054
+ streamer: Optional["BaseStreamer"] = None,
1055
+ **kwargs,
1056
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1057
+ generation_config = generation_config if generation_config is not None else self.generation_config
1058
+
1059
+ # Process stop_words_ids.
1060
+ stop_words_ids = kwargs.pop("stop_words_ids", None)
1061
+ if stop_words_ids is None and generation_config is not None:
1062
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1063
+ if stop_words_ids is None:
1064
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1065
+
1066
+ if stop_words_ids is not None:
1067
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1068
+ stop_words_ids=stop_words_ids,
1069
+ eos_token_id=generation_config.eos_token_id,
1070
+ )
1071
+ if logits_processor is None:
1072
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1073
+ else:
1074
+ logits_processor.append(stop_words_logits_processor)
1075
+
1076
+ return super().generate(
1077
+ inputs,
1078
+ generation_config=generation_config,
1079
+ logits_processor=logits_processor,
1080
+ stopping_criteria=stopping_criteria,
1081
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1082
+ synced_gpus=synced_gpus,
1083
+ assistant_model=assistant_model,
1084
+ streamer=streamer,
1085
+ **kwargs,
1086
+ )
1087
+
1088
+
1089
+ class RotaryEmbedding(torch.nn.Module):
1090
+ def __init__(self, dim, base=10000):
1091
+ super().__init__()
1092
+ self.dim = dim
1093
+ self.base = base
1094
+ self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
1095
+ if importlib.util.find_spec("einops") is None:
1096
+ raise RuntimeError("einops is required for Rotary Embedding")
1097
+
1098
+ self._rotary_pos_emb_cache = None
1099
+ self._seq_len_cached = 0
1100
+ self._ntk_alpha_cached = 1.0
1101
+
1102
+ def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
1103
+ seqlen = max_seq_len + offset
1104
+ if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1105
+ base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1106
+ self.inv_freq = 1.0 / (
1107
+ base
1108
+ ** (
1109
+ torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
1110
+ / self.dim
1111
+ )
1112
+ )
1113
+ self._seq_len_cached = max(2 * seqlen, 16)
1114
+ self._ntk_alpha_cached = ntk_alpha
1115
+ seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
1116
+ freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
1117
+
1118
+ emb = torch.cat((freqs, freqs), dim=-1)
1119
+ from einops import rearrange
1120
+
1121
+ emb = rearrange(emb, "n d -> 1 n 1 d")
1122
+
1123
+ cos, sin = emb.cos(), emb.sin()
1124
+ self._rotary_pos_emb_cache = [cos, sin]
1125
+
1126
+ def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
1127
+ self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
1128
+ cos, sin = self._rotary_pos_emb_cache
1129
+ return [cos[:, offset : offset + max_seq_len], sin[:, offset : offset + max_seq_len]]
1130
+
1131
+
1132
+ def _rotate_half(x):
1133
+ from einops import rearrange
1134
+
1135
+ x = rearrange(x, "... (j d) -> ... j d", j=2)
1136
+ x1, x2 = x.unbind(dim=-2)
1137
+ return torch.cat((-x2, x1), dim=-1)
1138
+
1139
+
1140
+ def apply_rotary_pos_emb(t, freqs):
1141
+ cos, sin = freqs
1142
+ if apply_rotary_emb_func is not None and t.is_cuda:
1143
+ t_ = t.float()
1144
+ cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]
1145
+ sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]
1146
+ output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
1147
+ return output
1148
+ else:
1149
+ rot_dim = freqs[0].shape[-1]
1150
+ cos, sin = freqs
1151
+ t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
1152
+ t_ = t_.float()
1153
+ t_pass_ = t_pass_.float()
1154
+ t_ = (t_ * cos) + (_rotate_half(t_) * sin)
1155
+ return torch.cat((t_, t_pass_), dim=-1).type_as(t)
1156
+
1157
+
1158
+ class RMSNorm(torch.nn.Module):
1159
+ def __init__(self, dim: int, eps: float = 1e-6):
1160
+ super().__init__()
1161
+ self.eps = eps
1162
+ self.weight = nn.Parameter(torch.ones(dim))
1163
+
1164
+ def _norm(self, x):
1165
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
1166
+
1167
+ def forward(self, x):
1168
+ if rms_norm is not None and x.is_cuda:
1169
+ return rms_norm(x, self.weight, self.eps)
1170
+ else:
1171
+ output = self._norm(x.float()).type_as(x)
1172
+ return output * self.weight
segagent/zzzmmz/SegAgent-Model/qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
segagent/zzzmmz/SegAgent-Model/qwen_generation_utils.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Generation support."""
7
+
8
+ from typing import Tuple, List, Union, Iterable
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from transformers import PreTrainedTokenizer
14
+ from transformers import logging
15
+ from transformers.generation import LogitsProcessor
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+ # Types.
20
+ HistoryType = List[Tuple[str, str]]
21
+ TokensType = List[int]
22
+ BatchTokensType = List[List[int]]
23
+
24
+
25
+ def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
26
+ for tokens in batch:
27
+ context_length = len(tokens)
28
+ if context_length < seq_length:
29
+ tokens.extend([pad_id] * (seq_length - context_length))
30
+ return batch
31
+
32
+
33
+ def get_ltor_masks_and_position_ids(
34
+ data,
35
+ eod_token,
36
+ reset_position_ids,
37
+ reset_attention_mask,
38
+ eod_mask_loss,
39
+ ):
40
+ """Build masks and position id for left to right model."""
41
+
42
+ # Extract batch size and sequence length.
43
+ micro_batch_size, seq_length = data.size()
44
+
45
+ # Attention mask (lower triangular).
46
+ if reset_attention_mask:
47
+ att_mask_batch = micro_batch_size
48
+ else:
49
+ att_mask_batch = 1
50
+ attention_mask = torch.tril(
51
+ torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
52
+ ).view(att_mask_batch, 1, seq_length, seq_length)
53
+
54
+ # Loss mask.
55
+ loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
56
+ if eod_mask_loss:
57
+ loss_mask[data == eod_token] = 0.0
58
+
59
+ # Position ids.
60
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
61
+ position_ids = position_ids.unsqueeze(0).expand_as(data)
62
+ # We need to clone as the ids will be modifed based on batch index.
63
+ if reset_position_ids:
64
+ position_ids = position_ids.clone()
65
+
66
+ if reset_position_ids or reset_attention_mask:
67
+ # Loop through the batches:
68
+ for b in range(micro_batch_size):
69
+
70
+ # Find indecies where EOD token is.
71
+ eod_index = position_ids[b, data[b] == eod_token]
72
+ # Detach indecies from positions if going to modify positions.
73
+ if reset_position_ids:
74
+ eod_index = eod_index.clone()
75
+
76
+ # Loop through EOD indecies:
77
+ prev_index = 0
78
+ for j in range(eod_index.size()[0]):
79
+ i = eod_index[j]
80
+ # Mask attention loss.
81
+ if reset_attention_mask:
82
+ attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
83
+ # Reset positions.
84
+ if reset_position_ids:
85
+ position_ids[b, (i + 1) :] -= i + 1 - prev_index
86
+ prev_index = i + 1
87
+
88
+ # Convert attention mask to binary:
89
+ attention_mask = attention_mask < 0.5
90
+
91
+ return attention_mask, loss_mask, position_ids
92
+
93
+
94
+ def get_batch(context_tokens: torch.LongTensor, eod_id: int):
95
+ """Generate batch from context tokens."""
96
+ # Move to GPU.
97
+ tokens = context_tokens.contiguous().to(context_tokens.device)
98
+ # Get the attention mask and postition ids.
99
+ attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
100
+ tokens,
101
+ eod_id,
102
+ reset_position_ids=False,
103
+ reset_attention_mask=False,
104
+ eod_mask_loss=False,
105
+ )
106
+ return tokens, attention_mask, position_ids
107
+
108
+
109
+ def get_stop_words_ids(chat_format, tokenizer):
110
+ if chat_format == "raw":
111
+ stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
112
+ elif chat_format == "chatml":
113
+ stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
114
+ else:
115
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
116
+ return stop_words_ids
117
+
118
+
119
+ def make_context(
120
+ tokenizer: PreTrainedTokenizer,
121
+ query: str,
122
+ history: List[Tuple[str, str]] = None,
123
+ system: str = "",
124
+ max_window_size: int = 6144,
125
+ chat_format: str = "chatml",
126
+ ):
127
+ if history is None:
128
+ history = []
129
+
130
+ if chat_format == "chatml":
131
+ im_start, im_end = "<|im_start|>", "<|im_end|>"
132
+ im_start_tokens = [tokenizer.im_start_id]
133
+ im_end_tokens = [tokenizer.im_end_id]
134
+ nl_tokens = tokenizer.encode("\n")
135
+
136
+ def _tokenize_str(role, content):
137
+ return f"{role}\n{content}", tokenizer.encode(
138
+ role, allowed_special=set(tokenizer.IMAGE_ST)
139
+ ) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))
140
+
141
+ system_text, system_tokens_part = _tokenize_str("system", system)
142
+ system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
143
+
144
+ raw_text = ""
145
+ context_tokens = []
146
+
147
+ for turn_query, turn_response in reversed(history):
148
+ query_text, query_tokens_part = _tokenize_str("user", turn_query)
149
+ query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
150
+ if turn_response is not None:
151
+ response_text, response_tokens_part = _tokenize_str(
152
+ "assistant", turn_response
153
+ )
154
+ response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
155
+
156
+ next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
157
+ prev_chat = (
158
+ f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
159
+ )
160
+ else:
161
+ next_context_tokens = nl_tokens + query_tokens + nl_tokens
162
+ prev_chat = f"\n{im_start}{query_text}{im_end}\n"
163
+
164
+ current_context_size = (
165
+ len(system_tokens) + len(next_context_tokens) + len(context_tokens)
166
+ )
167
+ if current_context_size < max_window_size:
168
+ context_tokens = next_context_tokens + context_tokens
169
+ raw_text = prev_chat + raw_text
170
+ else:
171
+ break
172
+
173
+ context_tokens = system_tokens + context_tokens
174
+ raw_text = f"{im_start}{system_text}{im_end}" + raw_text
175
+ context_tokens += (
176
+ nl_tokens
177
+ + im_start_tokens
178
+ + _tokenize_str("user", query)[1]
179
+ + im_end_tokens
180
+ + nl_tokens
181
+ + im_start_tokens
182
+ + tokenizer.encode("assistant")
183
+ + nl_tokens
184
+ )
185
+ raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
186
+
187
+ elif chat_format == "raw":
188
+ raw_text = query
189
+ context_tokens = tokenizer.encode(raw_text)
190
+ else:
191
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
192
+
193
+ return raw_text, context_tokens
194
+
195
+
196
+ def _decode_default(
197
+ tokens: List[int],
198
+ *,
199
+ stop_words: List[str],
200
+ eod_words: List[str],
201
+ tokenizer: PreTrainedTokenizer,
202
+ raw_text_len: int,
203
+ verbose: bool = False,
204
+ return_end_reason: bool = False,
205
+ errors: str='replace',
206
+ ):
207
+ trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
208
+ if verbose:
209
+ print("\nRaw Generate: ", trim_decode_tokens)
210
+
211
+ end_reason = f"Gen length {len(tokens)}"
212
+ for stop_word in stop_words:
213
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
214
+ for eod_word in eod_words:
215
+ if eod_word in trim_decode_tokens:
216
+ end_reason = f"Gen {eod_word!r}"
217
+ trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
218
+ trim_decode_tokens = trim_decode_tokens.strip()
219
+ if verbose:
220
+ print("\nEnd Reason:", end_reason)
221
+ print("\nGenerate: ", trim_decode_tokens)
222
+
223
+ if return_end_reason:
224
+ return trim_decode_tokens, end_reason
225
+ else:
226
+ return trim_decode_tokens
227
+
228
+
229
+ def _decode_chatml(
230
+ tokens: List[int],
231
+ *,
232
+ stop_words: List[str],
233
+ eod_token_ids: List[int],
234
+ tokenizer: PreTrainedTokenizer,
235
+ raw_text_len: int,
236
+ context_length: int,
237
+ verbose: bool = False,
238
+ return_end_reason: bool = False,
239
+ errors: str='replace'
240
+ ):
241
+ end_reason = f"Gen length {len(tokens)}"
242
+ eod_token_idx = context_length
243
+ for eod_token_idx in range(context_length, len(tokens)):
244
+ if tokens[eod_token_idx] in eod_token_ids:
245
+ end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
246
+ break
247
+
248
+ trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
249
+ if verbose:
250
+ print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
251
+ print("\nRaw Generate:", trim_decode_tokens)
252
+ print("\nEnd Reason:", end_reason)
253
+ for stop_word in stop_words:
254
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
255
+ trim_decode_tokens = trim_decode_tokens.strip()
256
+ if verbose:
257
+ print("\nGenerate:", trim_decode_tokens)
258
+
259
+ if return_end_reason:
260
+ return trim_decode_tokens, end_reason
261
+ else:
262
+ return trim_decode_tokens
263
+
264
+
265
+ def decode_tokens(
266
+ tokens: Union[torch.LongTensor, TokensType],
267
+ tokenizer: PreTrainedTokenizer,
268
+ raw_text_len: int,
269
+ context_length: int,
270
+ chat_format: str,
271
+ verbose: bool = False,
272
+ return_end_reason: bool = False,
273
+ errors: str="replace",
274
+ ) -> str:
275
+ if torch.is_tensor(tokens):
276
+ tokens = tokens.cpu().numpy().tolist()
277
+
278
+ if chat_format == "chatml":
279
+ return _decode_chatml(
280
+ tokens,
281
+ stop_words=[],
282
+ eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
283
+ tokenizer=tokenizer,
284
+ raw_text_len=raw_text_len,
285
+ context_length=context_length,
286
+ verbose=verbose,
287
+ return_end_reason=return_end_reason,
288
+ errors=errors,
289
+ )
290
+ elif chat_format == "raw":
291
+ return _decode_default(
292
+ tokens,
293
+ stop_words=["<|endoftext|>"],
294
+ eod_words=["<|endoftext|>"],
295
+ tokenizer=tokenizer,
296
+ raw_text_len=raw_text_len,
297
+ verbose=verbose,
298
+ return_end_reason=return_end_reason,
299
+ errors=errors,
300
+ )
301
+ else:
302
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
303
+
304
+
305
+ class StopWordsLogitsProcessor(LogitsProcessor):
306
+ """
307
+ :class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
308
+
309
+ Args:
310
+ stop_words_ids (:obj:`List[List[int]]`):
311
+ List of list of token ids of stop ids. In order to get the tokens of the words
312
+ that should not appear in the generated text, use :obj:`tokenizer(bad_word,
313
+ add_prefix_space=True).input_ids`.
314
+ eos_token_id (:obj:`int`):
315
+ The id of the `end-of-sequence` token.
316
+ """
317
+
318
+ def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
319
+
320
+ if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
321
+ raise ValueError(
322
+ f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
323
+ )
324
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
325
+ raise ValueError(
326
+ f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
327
+ )
328
+ if any(
329
+ any(
330
+ (not isinstance(token_id, (int, np.integer)) or token_id < 0)
331
+ for token_id in stop_word_ids
332
+ )
333
+ for stop_word_ids in stop_words_ids
334
+ ):
335
+ raise ValueError(
336
+ f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
337
+ )
338
+
339
+ self.stop_words_ids = list(
340
+ filter(
341
+ lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
342
+ )
343
+ )
344
+ self.eos_token_id = eos_token_id
345
+ for stop_token_seq in self.stop_words_ids:
346
+ assert (
347
+ len(stop_token_seq) > 0
348
+ ), "Stop words token sequences {} cannot have an empty list".format(
349
+ stop_words_ids
350
+ )
351
+
352
+ def __call__(
353
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
354
+ ) -> torch.FloatTensor:
355
+ stopped_samples = self._calc_stopped_samples(input_ids)
356
+ for i, should_stop in enumerate(stopped_samples):
357
+ if should_stop:
358
+ scores[i, self.eos_token_id] = float(2**15)
359
+ return scores
360
+
361
+ def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
362
+ if len(tokens) == 0:
363
+ # if bad word tokens is just one token always ban it
364
+ return True
365
+ elif len(tokens) > len(prev_tokens):
366
+ # if bad word tokens are longer then prev input_ids they can't be equal
367
+ return False
368
+ elif prev_tokens[-len(tokens) :].tolist() == tokens:
369
+ # if tokens match
370
+ return True
371
+ else:
372
+ return False
373
+
374
+ def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
375
+ stopped_samples = []
376
+ for prev_input_ids_slice in prev_input_ids:
377
+ match = False
378
+ for stop_token_seq in self.stop_words_ids:
379
+ if self._tokens_match(prev_input_ids_slice, stop_token_seq):
380
+ # if tokens do not match continue
381
+ match = True
382
+ break
383
+ stopped_samples.append(match)
384
+
385
+ return stopped_samples
386
+
387
+
388
+ def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
389
+ """This function has been mostly taken from huggingface conversational
390
+ ai code at
391
+ https://medium.com/huggingface/how-to-build-a-state-of-the-art-
392
+ conversational-ai-with-transfer-learning-2d818ac26313"""
393
+
394
+ if top_k > 0:
395
+ # Remove all tokens with a probability less than the
396
+ # last token of the top-k
397
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
398
+ logits[indices_to_remove] = filter_value
399
+
400
+ if top_p > 0.0:
401
+ # Cconvert to 1D
402
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
403
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
404
+
405
+ # Remove tokens with cumulative probability above the threshold
406
+ sorted_indices_to_remove = cumulative_probs > top_p
407
+ # Shift the indices to the right to keep also the first token
408
+ # above the threshold
409
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
410
+ sorted_indices_to_remove[..., 0] = 0
411
+ for i in range(sorted_indices.size(0)):
412
+ indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
413
+ logits[i][indices_to_remove] = filter_value
414
+
415
+ return logits
416
+
417
+
418
+ def switch(val1, val2, boolean):
419
+ boolean = boolean.type_as(val1)
420
+ return (1 - boolean) * val1 + boolean * val2
segagent/zzzmmz/SegAgent-Model/special_tokens_map.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "pad_token": "<|endoftext|>"
3
+ }
segagent/zzzmmz/SegAgent-Model/tokenization_qwen.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import requests
12
+ import unicodedata
13
+ from typing import Collection, Dict, List, Set, Tuple, Union, Any, Callable, Optional
14
+
15
+ import tiktoken
16
+ import numpy as np
17
+ from PIL import Image
18
+ from PIL import ImageFont
19
+ from PIL import ImageDraw
20
+ from transformers import PreTrainedTokenizer, AddedToken
21
+ from transformers.utils import try_to_load_from_cache
22
+
23
+ import matplotlib.colors as mcolors
24
+ from matplotlib.font_manager import FontProperties
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken", "ttf": "SimSun.ttf"}
30
+ FONT_PATH = try_to_load_from_cache("Qwen/Qwen-VL-Chat", "SimSun.ttf")
31
+ if FONT_PATH is None:
32
+ if not os.path.exists("SimSun.ttf"):
33
+ ttf = requests.get("https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/SimSun.ttf")
34
+ open("SimSun.ttf", "wb").write(ttf.content)
35
+ FONT_PATH = "SimSun.ttf"
36
+
37
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
38
+ ENDOFTEXT = "<|endoftext|>"
39
+ IMSTART = "<|im_start|>"
40
+ IMEND = "<|im_end|>"
41
+ # as the default behavior is changed to allow special tokens in
42
+ # regular texts, the surface forms of special tokens need to be
43
+ # as different as possible to minimize the impact
44
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
45
+ SPECIAL_TOKENS = (
46
+ ENDOFTEXT,
47
+ IMSTART,
48
+ IMEND,
49
+ ) + EXTRAS
50
+ IMG_TOKEN_SPAN = 256
51
+
52
+
53
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
54
+ with open(tiktoken_bpe_file, "rb") as f:
55
+ contents = f.read()
56
+ return {
57
+ base64.b64decode(token): int(rank)
58
+ for token, rank in (line.split() for line in contents.splitlines() if line)
59
+ }
60
+
61
+ def _list_find(
62
+ input_list: List[Any],
63
+ candidates: Tuple[Any],
64
+ start: int = 0,
65
+ ):
66
+ for i in range(start, len(input_list)):
67
+ if input_list[i] in candidates:
68
+ return i
69
+ return -1
70
+
71
+ def _replace_closed_tag(
72
+ input_tokens: List[Any],
73
+ start_tags: Union[Any, Tuple[Any]],
74
+ end_tags: Union[Any, Tuple[Any]],
75
+ inclusive_replace_func: Callable,
76
+ exclusive_replace_func: Callable = lambda x: x,
77
+ ):
78
+ if isinstance(start_tags, (str, int)):
79
+ start_tags = (start_tags,)
80
+ if isinstance(end_tags, (str, int)):
81
+ end_tags = (end_tags,)
82
+ assert len(start_tags) == len(end_tags)
83
+
84
+ output_tokens = []
85
+ end = 0
86
+ while True:
87
+ start = _list_find(input_tokens, start_tags, end)
88
+ if start == -1:
89
+ break
90
+ output_tokens.extend(exclusive_replace_func(input_tokens[end : start]))
91
+ tag_idx = start_tags.index(input_tokens[start])
92
+ end = _list_find(input_tokens, (end_tags[tag_idx],), start)
93
+ if end == -1:
94
+ raise ValueError("Unclosed image token")
95
+ output_tokens.extend(inclusive_replace_func(input_tokens[start : end + 1]))
96
+ end += 1
97
+ output_tokens.extend(exclusive_replace_func(input_tokens[end : ]))
98
+ return output_tokens
99
+
100
+ class QWenTokenizer(PreTrainedTokenizer):
101
+ """QWen tokenizer."""
102
+
103
+ vocab_files_names = VOCAB_FILES_NAMES
104
+
105
+ def __init__(
106
+ self,
107
+ vocab_file,
108
+ errors="replace",
109
+ image_start_tag='<img>',
110
+ image_end_tag='</img>',
111
+ image_pad_tag='<imgpad>',
112
+ ref_start_tag='<ref>',
113
+ ref_end_tag='</ref>',
114
+ box_start_tag='<box>',
115
+ box_end_tag='</box>',
116
+ quad_start_tag='<quad>',
117
+ quad_end_tag='</quad>',
118
+ **kwargs,
119
+ ):
120
+ super().__init__(**kwargs)
121
+ self.image_start_tag = image_start_tag
122
+ self.image_end_tag = image_end_tag
123
+ self.image_pad_tag = image_pad_tag
124
+ self.ref_start_tag = ref_start_tag
125
+ self.ref_end_tag = ref_end_tag
126
+ self.box_start_tag = box_start_tag
127
+ self.box_end_tag = box_end_tag
128
+ self.quad_start_tag = quad_start_tag
129
+ self.quad_end_tag = quad_end_tag
130
+ self.IMAGE_ST = (
131
+ ref_start_tag, ref_end_tag,
132
+ box_start_tag, box_end_tag,
133
+ quad_start_tag, quad_end_tag,
134
+ image_start_tag, image_end_tag,
135
+ image_pad_tag
136
+ )
137
+
138
+ self.errors = errors # how to handle errors in decoding
139
+
140
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
141
+ self.special_tokens = {
142
+ token: index
143
+ for index, token in enumerate(
144
+ SPECIAL_TOKENS + self.IMAGE_ST, start=len(self.mergeable_ranks)
145
+ )
146
+ }
147
+ self.img_start_id = self.special_tokens[self.image_start_tag]
148
+ self.img_end_id = self.special_tokens[self.image_end_tag]
149
+ self.img_pad_id = self.special_tokens[self.image_pad_tag]
150
+ self.ref_start_id = self.special_tokens[self.ref_start_tag]
151
+ self.ref_end_id = self.special_tokens[self.ref_end_tag]
152
+ self.box_start_id = self.special_tokens[self.box_start_tag]
153
+ self.box_end_id = self.special_tokens[self.box_end_tag]
154
+ self.quad_start_id = self.special_tokens[self.quad_start_tag]
155
+ self.quad_end_id = self.special_tokens[self.quad_end_tag]
156
+ self.image_special_tokens = set([
157
+ self.ref_start_id, self.ref_end_id, self.box_start_id, self.box_end_id,
158
+ self.quad_start_id, self.quad_end_id,
159
+ ])
160
+
161
+ enc = tiktoken.Encoding(
162
+ "Qwen",
163
+ pat_str=PAT_STR,
164
+ mergeable_ranks=self.mergeable_ranks,
165
+ special_tokens=self.special_tokens,
166
+ )
167
+ assert (
168
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
169
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
170
+
171
+ self.decoder = {
172
+ v: k for k, v in self.mergeable_ranks.items()
173
+ } # type: dict[int, bytes|str]
174
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
175
+
176
+ self.tokenizer = enc # type: tiktoken.Encoding
177
+
178
+ self.eod_id = self.tokenizer.eot_token
179
+ self.im_start_id = self.special_tokens[IMSTART]
180
+ self.im_end_id = self.special_tokens[IMEND]
181
+
182
+ def __getstate__(self):
183
+ # for pickle lovers
184
+ state = self.__dict__.copy()
185
+ del state['tokenizer']
186
+ return state
187
+
188
+ def __setstate__(self, state):
189
+ # tokenizer is not python native; don't pass it; rebuild it
190
+ self.__dict__.update(state)
191
+ enc = tiktoken.Encoding(
192
+ "Qwen",
193
+ pat_str=PAT_STR,
194
+ mergeable_ranks=self.mergeable_ranks,
195
+ special_tokens=self.special_tokens,
196
+ )
197
+ self.tokenizer = enc
198
+
199
+
200
+ def __len__(self) -> int:
201
+ return self.tokenizer.n_vocab
202
+
203
+ def get_vocab(self) -> Dict[bytes, int]:
204
+ return self.mergeable_ranks
205
+
206
+ def convert_tokens_to_ids(
207
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
208
+ ) -> List[int]:
209
+ ids = []
210
+ if isinstance(tokens, (str, bytes)):
211
+ if tokens in self.special_tokens:
212
+ return self.special_tokens[tokens]
213
+ else:
214
+ return self.mergeable_ranks.get(tokens)
215
+ for token in tokens:
216
+ if token in self.special_tokens:
217
+ ids.append(self.special_tokens[token])
218
+ else:
219
+ ids.append(self.mergeable_ranks.get(token))
220
+ return ids
221
+
222
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
223
+ if not special_tokens and new_tokens:
224
+ raise ValueError('Adding regular tokens is not supported')
225
+ for token in new_tokens:
226
+ surface_form = token.content if isinstance(token, AddedToken) else token
227
+ if surface_form not in SPECIAL_TOKENS + self.IMAGE_ST:
228
+ raise ValueError('Adding unknown special tokens is not supported')
229
+ return 0
230
+
231
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
232
+ """
233
+ Save only the vocabulary of the tokenizer (vocabulary).
234
+
235
+ Returns:
236
+ `Tuple(str)`: Paths to the files saved.
237
+ """
238
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
239
+ with open(file_path, "w", encoding="utf8") as w:
240
+ for k, v in self.mergeable_ranks.items():
241
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
242
+ w.write(line)
243
+ return (file_path,)
244
+
245
+ def tokenize(
246
+ self,
247
+ text: str,
248
+ allowed_special: Union[Set, str] = "all",
249
+ disallowed_special: Union[Collection, str] = (),
250
+ **kwargs,
251
+ ) -> List[Union[bytes, str]]:
252
+ """
253
+ Converts a string in a sequence of tokens.
254
+
255
+ Args:
256
+ text (`str`):
257
+ The sequence to be encoded.
258
+ allowed_special (`Literal["all"]` or `set`):
259
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
260
+ Default to "all".
261
+ disallowed_special (`Literal["all"]` or `Collection`):
262
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
263
+ Default to an empty tuple.
264
+
265
+ kwargs (additional keyword arguments, *optional*):
266
+ Will be passed to the underlying model specific encode method.
267
+
268
+ Returns:
269
+ `List[bytes|str]`: The list of tokens.
270
+ """
271
+ tokens = []
272
+ text = unicodedata.normalize("NFC", text)
273
+
274
+ # this implementation takes a detour: text -> token id -> token surface forms
275
+ for t in self.tokenizer.encode(
276
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
277
+ ):
278
+ tokens.append(self.decoder[t])
279
+
280
+ def _encode_imgurl(img_tokens):
281
+ assert img_tokens[0] == self.image_start_tag and img_tokens[-1] == self.image_end_tag
282
+ img_tokens = img_tokens[1:-1]
283
+ img_url = b''.join(img_tokens)
284
+ out_img_tokens = list(map(self.decoder.get, img_url))
285
+ if len(out_img_tokens) > IMG_TOKEN_SPAN:
286
+ raise ValueError("The content in {}..{} is too long".format(
287
+ self.image_start_tag, self.image_end_tag))
288
+ out_img_tokens.extend([self.image_pad_tag] * (IMG_TOKEN_SPAN - len(out_img_tokens)))
289
+ out_img_tokens = [self.image_start_tag] + out_img_tokens + [self.image_end_tag]
290
+ return out_img_tokens
291
+
292
+ return _replace_closed_tag(tokens, self.image_start_tag, self.image_end_tag, _encode_imgurl)
293
+
294
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
295
+ """
296
+ Converts a sequence of tokens in a single string.
297
+ """
298
+ text = ""
299
+ temp = b""
300
+ for t in tokens:
301
+ if isinstance(t, str):
302
+ if temp:
303
+ text += temp.decode("utf-8", errors=self.errors)
304
+ temp = b""
305
+ text += t
306
+ elif isinstance(t, bytes):
307
+ temp += t
308
+ else:
309
+ raise TypeError("token should only be of type types or str")
310
+ if temp:
311
+ text += temp.decode("utf-8", errors=self.errors)
312
+ return text
313
+
314
+ @property
315
+ def vocab_size(self):
316
+ return self.tokenizer.n_vocab
317
+
318
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
319
+ """Converts an id to a token, special tokens included"""
320
+ if index in self.decoder:
321
+ return self.decoder[index]
322
+ raise ValueError("unknown ids")
323
+
324
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
325
+ """Converts a token to an id using the vocab, special tokens included"""
326
+ if token in self.special_tokens:
327
+ return self.special_tokens[token]
328
+ if token in self.mergeable_ranks:
329
+ return self.mergeable_ranks[token]
330
+ raise ValueError("unknown token")
331
+
332
+ def _tokenize(self, text: str, **kwargs):
333
+ """
334
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
335
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
336
+
337
+ Do NOT take care of added tokens.
338
+ """
339
+ raise NotImplementedError
340
+
341
+ def _decode(
342
+ self,
343
+ token_ids: Union[int, List[int]],
344
+ skip_special_tokens: bool = False,
345
+ errors: str = None,
346
+ **kwargs,
347
+ ) -> str:
348
+ if isinstance(token_ids, int):
349
+ token_ids = [token_ids]
350
+
351
+ def _decode_imgurl(img_token_ids):
352
+ assert img_token_ids[0] == self.img_start_id and img_token_ids[-1] == self.img_end_id
353
+ img_token_ids = img_token_ids[1:-1]
354
+ img_token_ids = img_token_ids[ : img_token_ids.index(self.img_pad_id)]
355
+ img_url = bytes(img_token_ids).decode('utf-8')
356
+ return [self.img_start_id] + self.tokenizer.encode(img_url) + [self.img_end_id]
357
+
358
+ token_ids = _replace_closed_tag(token_ids, self.img_start_id, self.img_end_id, _decode_imgurl)
359
+
360
+ if skip_special_tokens:
361
+ if kwargs.get('keep_image_special', False):
362
+ token_ids = [i for i in token_ids if i < self.eod_id
363
+ or i in self.image_special_tokens]
364
+ else:
365
+ token_ids = [i for i in token_ids if i < self.eod_id]
366
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
367
+
368
+ def to_list_format(self, text: str):
369
+ text = unicodedata.normalize("NFC", text)
370
+ token_ids = self.tokenizer.encode(
371
+ text, allowed_special=set(self.IMAGE_ST + (ENDOFTEXT,)))
372
+
373
+ def _encode_vl_info(tokens):
374
+ if len(tokens) == 0:
375
+ return []
376
+ if tokens[0] == self.img_start_id and tokens[-1] == self.img_end_id:
377
+ key = 'image'
378
+ elif tokens[0] == self.ref_start_id and tokens[-1] == self.ref_end_id:
379
+ key = 'ref'
380
+ elif tokens[0] == self.box_start_id and tokens[-1] == self.box_end_id:
381
+ key = 'box'
382
+ elif tokens[0] == self.quad_start_id and tokens[-1] == self.quad_end_id:
383
+ key = 'quad'
384
+ else:
385
+ _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
386
+ return [{'text': b''.join(map(_tobytes, map(self.decoder.get, tokens))).decode('utf-8')}]
387
+ _tobytes = lambda x: x.encode('utf-8') if isinstance(x, str) else x
388
+ val = b''.join(map(_tobytes, map(self.decoder.get, tokens[1:-1]))).decode('utf-8')
389
+ return [{key: val}]
390
+
391
+ return _replace_closed_tag(
392
+ token_ids,
393
+ (self.img_start_id, self.ref_start_id, self.box_start_id, self.quad_start_id),
394
+ (self.img_end_id, self.ref_end_id, self.box_end_id, self.quad_end_id),
395
+ _encode_vl_info,
396
+ _encode_vl_info,
397
+ )
398
+
399
+ def from_list_format(self, list_format: List[Dict]):
400
+ text = ''
401
+ num_images = 0
402
+ for ele in list_format:
403
+ if 'image' in ele:
404
+ num_images += 1
405
+ text += f'Picture {num_images}: '
406
+ text += self.image_start_tag + ele['image'] + self.image_end_tag
407
+ text += '\n'
408
+ elif 'text' in ele:
409
+ text += ele['text']
410
+ elif 'box' in ele:
411
+ if 'ref' in ele:
412
+ text += self.ref_start_tag + ele['ref'] + self.ref_end_tag
413
+ for box in ele['box']:
414
+ text += self.box_start_tag + '(%d,%d),(%d,%d)' % (box[0], box[1], box[2], box[3]) + self.box_end_tag
415
+ else:
416
+ raise ValueError("Unsupport element: " + str(ele))
417
+ return text
418
+
419
+ def _fetch_latest_picture(self, response, history):
420
+ if history is None:
421
+ history = []
422
+ _history = history + [(response, None)]
423
+ for q, r in _history[::-1]:
424
+ for ele in self.to_list_format(q)[::-1]:
425
+ if 'image' in ele:
426
+ return ele['image']
427
+ return None
428
+
429
+ def _fetch_all_box_with_ref(self, text):
430
+ list_format = self.to_list_format(text)
431
+ output = []
432
+ for i, ele in enumerate(list_format):
433
+ if 'box' in ele:
434
+ bbox = tuple(map(int, ele['box'].replace('(', '').replace(')', '').split(',')))
435
+ assert len(bbox) == 4
436
+ output.append({'box': bbox})
437
+ if i > 0 and 'ref' in list_format[i-1]:
438
+ output[-1]['ref'] = list_format[i-1]['ref'].strip()
439
+ return output
440
+
441
+ def draw_bbox_on_latest_picture(
442
+ self,
443
+ response,
444
+ history=None,
445
+ ) -> Optional[Image.Image]:
446
+ image = self._fetch_latest_picture(response, history)
447
+ if image is None:
448
+ return None
449
+ if image.startswith("http://") or image.startswith("https://"):
450
+ image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
451
+ h, w = image.height, image.width
452
+ else:
453
+ image = np.asarray(Image.open(image).convert("RGB"))
454
+ h, w = image.shape[0], image.shape[1]
455
+ visualizer = Visualizer(image)
456
+
457
+ boxes = self._fetch_all_box_with_ref(response)
458
+ if not boxes:
459
+ return None
460
+ color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()]) # init color
461
+ for box in boxes:
462
+ if 'ref' in box: # random new color for new refexps
463
+ color = random.choice([_ for _ in mcolors.TABLEAU_COLORS.keys()])
464
+ x1, y1, x2, y2 = box['box']
465
+ x1, y1, x2, y2 = (int(x1 / 1000 * w), int(y1 / 1000 * h), int(x2 / 1000 * w), int(y2 / 1000 * h))
466
+ visualizer.draw_box((x1, y1, x2, y2), alpha=1, edge_color=color)
467
+ if 'ref' in box:
468
+ visualizer.draw_text(box['ref'], (x1, y1), color=color, horizontal_alignment="left")
469
+ return visualizer.output
470
+
471
+
472
+ import colorsys
473
+ import logging
474
+ import math
475
+ import numpy as np
476
+ import matplotlib as mpl
477
+ import matplotlib.colors as mplc
478
+ import matplotlib.figure as mplfigure
479
+ import torch
480
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
481
+ from PIL import Image
482
+ import random
483
+
484
+ logger = logging.getLogger(__name__)
485
+
486
+
487
+ class VisImage:
488
+ def __init__(self, img, scale=1.0):
489
+ self.img = img
490
+ self.scale = scale
491
+ self.width, self.height = img.shape[1], img.shape[0]
492
+ self._setup_figure(img)
493
+
494
+ def _setup_figure(self, img):
495
+ fig = mplfigure.Figure(frameon=False)
496
+ self.dpi = fig.get_dpi()
497
+ # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
498
+ # (https://github.com/matplotlib/matplotlib/issues/15363)
499
+ fig.set_size_inches(
500
+ (self.width * self.scale + 1e-2) / self.dpi,
501
+ (self.height * self.scale + 1e-2) / self.dpi,
502
+ )
503
+ self.canvas = FigureCanvasAgg(fig)
504
+ # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
505
+ ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
506
+ ax.axis("off")
507
+ self.fig = fig
508
+ self.ax = ax
509
+ self.reset_image(img)
510
+
511
+ def reset_image(self, img):
512
+ img = img.astype("uint8")
513
+ self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
514
+
515
+ def save(self, filepath):
516
+ self.fig.savefig(filepath)
517
+
518
+ def get_image(self):
519
+ canvas = self.canvas
520
+ s, (width, height) = canvas.print_to_buffer()
521
+
522
+ buffer = np.frombuffer(s, dtype="uint8")
523
+
524
+ img_rgba = buffer.reshape(height, width, 4)
525
+ rgb, alpha = np.split(img_rgba, [3], axis=2)
526
+ return rgb.astype("uint8")
527
+
528
+
529
+ class Visualizer:
530
+ def __init__(self, img_rgb, metadata=None, scale=1.0):
531
+ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
532
+ self.font_path = FONT_PATH
533
+ self.output = VisImage(self.img, scale=scale)
534
+ self.cpu_device = torch.device("cpu")
535
+
536
+ # too small texts are useless, therefore clamp to 14
537
+ self._default_font_size = max(
538
+ np.sqrt(self.output.height * self.output.width) // 30, 15 // scale
539
+ )
540
+
541
+ def draw_text(
542
+ self,
543
+ text,
544
+ position,
545
+ *,
546
+ font_size=None,
547
+ color="g",
548
+ horizontal_alignment="center",
549
+ rotation=0,
550
+ ):
551
+ if not font_size:
552
+ font_size = self._default_font_size
553
+
554
+ # since the text background is dark, we don't want the text to be dark
555
+ color = np.maximum(list(mplc.to_rgb(color)), 0.2)
556
+ color[np.argmax(color)] = max(0.8, np.max(color))
557
+
558
+ x, y = position
559
+ self.output.ax.text(
560
+ x,
561
+ y,
562
+ text,
563
+ size=font_size * self.output.scale,
564
+ fontproperties=FontProperties(fname=self.font_path),
565
+ bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
566
+ verticalalignment="top",
567
+ horizontalalignment=horizontal_alignment,
568
+ color=color,
569
+ zorder=10,
570
+ rotation=rotation,
571
+ )
572
+ return self.output
573
+
574
+ def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
575
+
576
+ x0, y0, x1, y1 = box_coord
577
+ width = x1 - x0
578
+ height = y1 - y0
579
+
580
+ linewidth = max(self._default_font_size / 4, 1)
581
+
582
+ self.output.ax.add_patch(
583
+ mpl.patches.Rectangle(
584
+ (x0, y0),
585
+ width,
586
+ height,
587
+ fill=False,
588
+ edgecolor=edge_color,
589
+ linewidth=linewidth * self.output.scale,
590
+ alpha=alpha,
591
+ linestyle=line_style,
592
+ )
593
+ )
594
+ return self.output
595
+
596
+ def get_output(self):
597
+
598
+ return self.output
segagent/zzzmmz/SegAgent-Model/tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenization_qwen.QWenTokenizer",
6
+ null
7
+ ]
8
+ },
9
+ "clean_up_tokenization_spaces": true,
10
+ "model_max_length": 2048,
11
+ "pad_token": "<|endoftext|>",
12
+ "padding_side": "right",
13
+ "tokenizer_class": "QWenTokenizer"
14
+ }
segagent/zzzmmz/SegAgent-Model/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
segagent/zzzmmz/SegAgent-Model/visual.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from collections import OrderedDict
7
+ import math
8
+ import requests
9
+ from io import BytesIO
10
+ from functools import partial
11
+ from PIL import Image,ImageOps
12
+ from typing import Callable, Optional, Sequence, Tuple, List
13
+ import numpy as np
14
+
15
+ import torch
16
+ from torch import nn
17
+ from torch.nn import functional as F
18
+ from torch.nn.init import normal_
19
+ from torchvision import transforms
20
+ from torchvision.transforms import InterpolationMode
21
+ from pycocotools import mask as maskUtils
22
+ import os
23
+ import torchshow
24
+ def overlay_mask(image, mask, color=(0, 255, 0), alpha=0.5):
25
+ overlay = image.copy()
26
+ binary_mask = mask
27
+ color_mask = np.zeros_like(image)
28
+ color_mask[binary_mask > 0] = color
29
+ for c in range(0, 3):
30
+ overlay[:, :, c] = np.where(binary_mask > 0,
31
+ overlay[:, :, c] * (1 - alpha) + color_mask[:, :, c] * alpha,
32
+ overlay[:, :, c])
33
+ return overlay
34
+
35
+ def get_abs_pos(abs_pos, tgt_size):
36
+ # abs_pos: L, C
37
+ # tgt_size: M
38
+ # return: M, C
39
+ src_size = int(math.sqrt(abs_pos.size(0)))
40
+ tgt_size = int(math.sqrt(tgt_size))
41
+ dtype = abs_pos.dtype
42
+
43
+ if src_size != tgt_size:
44
+ return F.interpolate(
45
+ abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
46
+ size=(tgt_size, tgt_size),
47
+ mode="bicubic",
48
+ align_corners=False,
49
+ ).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
50
+ else:
51
+ return abs_pos
52
+
53
+ # https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
54
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
55
+ """
56
+ grid_size: int of the grid height and width
57
+ return:
58
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
59
+ """
60
+ grid_h = np.arange(grid_size, dtype=np.float32)
61
+ grid_w = np.arange(grid_size, dtype=np.float32)
62
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
63
+ grid = np.stack(grid, axis=0)
64
+
65
+ grid = grid.reshape([2, 1, grid_size, grid_size])
66
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
67
+ if cls_token:
68
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
69
+ return pos_embed
70
+
71
+
72
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
73
+ assert embed_dim % 2 == 0
74
+
75
+ # use half of dimensions to encode grid_h
76
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
77
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
78
+
79
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
80
+ return emb
81
+
82
+
83
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
84
+ """
85
+ embed_dim: output dimension for each position
86
+ pos: a list of positions to be encoded: size (M,)
87
+ out: (M, D)
88
+ """
89
+ assert embed_dim % 2 == 0
90
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
91
+ omega /= embed_dim / 2.
92
+ omega = 1. / 10000**omega # (D/2,)
93
+
94
+ pos = pos.reshape(-1) # (M,)
95
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
96
+
97
+ emb_sin = np.sin(out) # (M, D/2)
98
+ emb_cos = np.cos(out) # (M, D/2)
99
+
100
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
101
+ return emb
102
+
103
+
104
+ class Resampler(nn.Module):
105
+ """
106
+ A 2D perceiver-resampler network with one cross attention layers by
107
+ (grid_size**2) learnable queries and 2d sincos pos_emb
108
+ Outputs:
109
+ A tensor with the shape of (grid_size**2, embed_dim)
110
+ """
111
+ def __init__(
112
+ self,
113
+ grid_size,
114
+ embed_dim,
115
+ num_heads,
116
+ kv_dim=None,
117
+ norm_layer=nn.LayerNorm
118
+ ):
119
+ super().__init__()
120
+ self.num_queries = grid_size ** 2
121
+ self.embed_dim = embed_dim
122
+ self.num_heads = num_heads
123
+
124
+ self.pos_embed = nn.Parameter(
125
+ torch.from_numpy(get_2d_sincos_pos_embed(embed_dim, grid_size)).float()
126
+ ).requires_grad_(False)
127
+
128
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
129
+ normal_(self.query, std=.02)
130
+
131
+ if kv_dim is not None and kv_dim != embed_dim:
132
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
133
+ else:
134
+ self.kv_proj = nn.Identity()
135
+
136
+ self.attn = nn.MultiheadAttention(embed_dim, num_heads)
137
+ self.ln_q = norm_layer(embed_dim)
138
+ self.ln_kv = norm_layer(embed_dim)
139
+
140
+ # self.apply(self._init_weights)
141
+
142
+ def _init_weights(self, m):
143
+ if isinstance(m, nn.Linear):
144
+ normal_(m.weight, std=.02)
145
+ if isinstance(m, nn.Linear) and m.bias is not None:
146
+ nn.init.constant_(m.bias, 0)
147
+ elif isinstance(m, nn.LayerNorm):
148
+ nn.init.constant_(m.bias, 0)
149
+ nn.init.constant_(m.weight, 1.0)
150
+
151
+ def forward(self, x, attn_mask=None):
152
+
153
+ pos_embed = get_abs_pos(self.pos_embed, x.size(1))
154
+
155
+ x = self.kv_proj(x)
156
+ x = self.ln_kv(x).permute(1, 0, 2)
157
+
158
+ N = x.shape[1]
159
+ q = self.ln_q(self.query)
160
+ out = self.attn(
161
+ self._repeat(q, N) + self.pos_embed.unsqueeze(1),
162
+ x + pos_embed.unsqueeze(1),
163
+ x,
164
+ attn_mask=attn_mask)[0]
165
+ return out.permute(1, 0, 2)
166
+
167
+ def _repeat(self, query, N: int):
168
+ return query.unsqueeze(1).repeat(1, N, 1)
169
+
170
+
171
+ class VisualAttention(nn.Module):
172
+ """self-attention layer class.
173
+
174
+ Self-attention layer takes input with size [s, b, h]
175
+ and returns output of the same size.
176
+ """
177
+
178
+ def __init__(self, embed_dim, num_heads,
179
+ bias=True, kdim=None, vdim=None):
180
+ super(VisualAttention, self).__init__()
181
+ self.embed_dim = embed_dim
182
+ self.kdim = kdim if kdim is not None else embed_dim
183
+ self.vdim = vdim if vdim is not None else embed_dim
184
+ self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
185
+
186
+ self.num_heads = num_heads
187
+
188
+ # Per attention head and per partition values.
189
+ assert embed_dim % num_heads == 0
190
+ self.hidden_size_per_attention_head = embed_dim // num_heads
191
+ self.num_attention_heads_per_partition = num_heads
192
+ self.hidden_size_per_partition = embed_dim
193
+
194
+ # Strided linear layer.
195
+ assert self._qkv_same_embed_dim, 'Only Support SelfAttention Currently'
196
+ self.in_proj = nn.Linear(embed_dim, 3 * embed_dim)
197
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
198
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
199
+
200
+ def forward(self, query, key, value, attn_mask = None):
201
+ # query/key/value: [sq, b, h]
202
+ sq, b, _ = query.size()
203
+
204
+ assert torch.allclose(query, key), 'Only Support Self-Attention Currently'
205
+ sk = sq
206
+ mixed_x_layer = self.in_proj(query)
207
+
208
+ # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
209
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
210
+ (self.num_attention_heads_per_partition,
211
+ 3 * self.hidden_size_per_attention_head)
212
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
213
+
214
+ # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
215
+ query_layer, key_layer, value_layer = mixed_x_layer.split(
216
+ self.hidden_size_per_attention_head, dim=-1)
217
+
218
+ # [sq, b, np, hn] -> [sq, b * np, hn]
219
+ query_layer = query_layer.view(sq,
220
+ b * self.num_attention_heads_per_partition,
221
+ self.hidden_size_per_attention_head).transpose(0, 1)
222
+ # [sk, b, np, hn] -> [sk, b * np, hn]
223
+ key_layer = key_layer.view(sk,
224
+ b * self.num_attention_heads_per_partition,
225
+ self.hidden_size_per_attention_head).transpose(0, 1)
226
+
227
+ q_scaled = query_layer / self.norm_factor
228
+ if attn_mask is not None:
229
+ attention_probs = torch.baddbmm(attn_mask, q_scaled, key_layer.transpose(-2, -1))
230
+ else:
231
+ attention_probs = torch.bmm(q_scaled, key_layer.transpose(-2, -1))
232
+ attention_probs = attention_probs.softmax(dim=-1)
233
+
234
+ value_layer = value_layer.view(sk,
235
+ b * self.num_attention_heads_per_partition,
236
+ self.hidden_size_per_attention_head).transpose(0, 1)
237
+
238
+ # matmul: [b * np, sq, hn]
239
+ context_layer = torch.bmm(attention_probs, value_layer)
240
+
241
+ # change view [b, np, sq, hn]
242
+ context_layer = context_layer.view(b,
243
+ self.num_attention_heads_per_partition,
244
+ sq, self.hidden_size_per_attention_head)
245
+
246
+ # [b, np, sq, hn] --> [sq, b, np, hn]
247
+ context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
248
+
249
+ # [sq, b, np, hn] --> [sq, b, hp]
250
+ new_context_layer_shape = context_layer.size()[:-2] + \
251
+ (self.hidden_size_per_partition,)
252
+ context_layer = context_layer.view(*new_context_layer_shape)
253
+
254
+ output = self.out_proj(context_layer)
255
+
256
+ return output
257
+
258
+
259
+ class VisualAttentionBlock(nn.Module):
260
+ def __init__(
261
+ self,
262
+ d_model: int,
263
+ n_head: int,
264
+ mlp_ratio: float = 4.0,
265
+ act_layer: Callable = nn.GELU,
266
+ norm_layer: Callable = nn.LayerNorm,
267
+ is_cross_attention: bool = False,
268
+ ):
269
+ super().__init__()
270
+
271
+ self.ln_1 = norm_layer(d_model)
272
+ if is_cross_attention:
273
+ self.ln_1_kv = norm_layer(d_model)
274
+
275
+ self.ln_2 = norm_layer(d_model)
276
+ mlp_width = int(d_model * mlp_ratio)
277
+ self.attn = VisualAttention(d_model, n_head)
278
+ self.mlp = nn.Sequential(OrderedDict([
279
+ ("c_fc", nn.Linear(d_model, mlp_width)),
280
+ ("gelu", act_layer()),
281
+ ("c_proj", nn.Linear(mlp_width, d_model))
282
+ ]))
283
+
284
+ def attention(
285
+ self,
286
+ q_x: torch.Tensor,
287
+ k_x: Optional[torch.Tensor] = None,
288
+ v_x: Optional[torch.Tensor] = None,
289
+ attn_mask: Optional[torch.Tensor] = None,
290
+ ):
291
+ k_x = k_x if k_x is not None else q_x
292
+ v_x = v_x if v_x is not None else q_x
293
+
294
+ attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
295
+ return self.attn(q_x, k_x, v_x, attn_mask=attn_mask)
296
+
297
+ def forward(
298
+ self,
299
+ q_x: torch.Tensor,
300
+ k_x: Optional[torch.Tensor] = None,
301
+ v_x: Optional[torch.Tensor] = None,
302
+ attn_mask: Optional[torch.Tensor] = None,
303
+ ):
304
+ k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
305
+ v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
306
+
307
+ x = q_x + self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask)
308
+ x = x + self.mlp(self.ln_2(x))
309
+ return x
310
+
311
+
312
+ class TransformerBlock(nn.Module):
313
+ def __init__(
314
+ self,
315
+ width: int,
316
+ layers: int,
317
+ heads: int,
318
+ mlp_ratio: float = 4.0,
319
+ act_layer: Callable = nn.GELU,
320
+ norm_layer: Callable = nn.LayerNorm,
321
+ ):
322
+ super().__init__()
323
+ self.width = width
324
+ self.layers = layers
325
+
326
+ self.resblocks = nn.ModuleList([
327
+ VisualAttentionBlock(
328
+ width, heads, mlp_ratio, act_layer=act_layer, norm_layer=norm_layer)
329
+ for _ in range(layers)
330
+ ])
331
+
332
+ def get_cast_dtype(self) -> torch.dtype:
333
+ return self.resblocks[0].mlp.c_fc.weight.dtype
334
+
335
+ def get_cast_device(self) -> torch.device:
336
+ return self.resblocks[0].mlp.c_fc.weight.device
337
+
338
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
339
+ for r in self.resblocks:
340
+ x = r(x, attn_mask=attn_mask)
341
+ return x
342
+
343
+
344
+ class VisionTransformer(nn.Module):
345
+
346
+ def __init__(
347
+ self,
348
+ image_size: int,
349
+ patch_size: int,
350
+ width: int,
351
+ layers: int,
352
+ heads: int,
353
+ mlp_ratio: float,
354
+ n_queries: int = 256,
355
+ output_dim: int = 512,
356
+ **kwargs
357
+ ):
358
+ super().__init__()
359
+ image_height, image_width = self.image_size = (image_size, image_size)
360
+ patch_height, patch_width = self.patch_size = (patch_size, patch_size)
361
+ self.grid_size = (image_height // patch_height, image_width // patch_width)
362
+ self.output_dim = output_dim
363
+
364
+ mean = (0.48145466, 0.4578275, 0.40821073)
365
+ std = (0.26862954, 0.26130258, 0.27577711)
366
+ self.image_transform = transforms.Compose([
367
+ transforms.Resize(
368
+ (image_size, image_size),
369
+ interpolation=InterpolationMode.BICUBIC
370
+ ),
371
+ transforms.ToTensor(),
372
+ transforms.Normalize(mean=mean, std=std),
373
+ ])
374
+
375
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
376
+
377
+ # class embeddings and positional embeddings
378
+ scale = width ** -0.5
379
+ self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))
380
+
381
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
382
+ act_layer = nn.GELU
383
+
384
+ self.ln_pre = norm_layer(width)
385
+ self.transformer = TransformerBlock(
386
+ width,
387
+ layers,
388
+ heads,
389
+ mlp_ratio,
390
+ act_layer=act_layer,
391
+ norm_layer=norm_layer,
392
+ )
393
+
394
+ self.attn_pool = Resampler(
395
+ grid_size=int(math.sqrt(n_queries)),
396
+ embed_dim=output_dim,
397
+ num_heads=output_dim // 128,
398
+ kv_dim=width,
399
+ norm_layer=norm_layer,
400
+ )
401
+ self.ln_post = norm_layer(output_dim)
402
+ self.proj = nn.Parameter((output_dim** -0.5) * torch.randn(output_dim, output_dim))
403
+ self.data_path = os.environ.get("DATA", "./")
404
+ self.visualize = os.environ.get("VISUALIZE", False)
405
+
406
+ def forward(self, x: torch.Tensor):
407
+ x = x.to(
408
+ dtype=self.transformer.get_cast_dtype(),
409
+ device=self.transformer.get_cast_device(),
410
+ )
411
+ # to patches
412
+ x = self.conv1(x) # shape = [*, width, grid, grid]
413
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
414
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
415
+
416
+ x = x + get_abs_pos(self.positional_embedding, x.size(1))
417
+
418
+ x = self.ln_pre(x)
419
+
420
+ x = x.permute(1, 0, 2) # NLD -> LND
421
+ x = self.transformer(x)
422
+ x = x.permute(1, 0, 2) # LND -> NLD
423
+
424
+ x = self.attn_pool(x)
425
+ x = self.ln_post(x)
426
+ x = x @ self.proj
427
+
428
+ return x
429
+
430
+ def encode(self, image_paths: List[str], masks:List[str] ):
431
+ images = []
432
+ for image_path, mask in zip(image_paths, masks):
433
+ if image_path.startswith("http://") or image_path.startswith("https://"):
434
+ image = Image.open(requests.get(image_path, stream=True).raw)
435
+ else:
436
+ image = Image.open(image_path)
437
+ image = ImageOps.exif_transpose(image)
438
+ image = image.convert("RGB")
439
+ if len(mask)>0:
440
+ print(len(mask))
441
+ seg = {'counts': mask, 'size': [image.size[1], image.size[0]]}
442
+ else:
443
+ seg = None
444
+ if seg is not None:
445
+ mask = self.annToMask(seg, image.size[1], image.size[0])
446
+ image = overlay_mask(np.array(image), mask)
447
+ image = Image.fromarray(image)
448
+ visulize = self.visualize
449
+ if visulize:
450
+ self.data_path = "visualize"
451
+ os.makedirs(self.data_path, exist_ok=True)
452
+ torchshow.save(image, os.path.join(self.data_path, image_path.split("/")[-1]))
453
+ images.append(self.image_transform(image))
454
+ images = torch.stack(images, dim=0)
455
+ #torchshow.save(images)
456
+ return self(images)
457
+
458
+ def annToMask(self, mask_ann, h, w):
459
+ if mask_ann is None:
460
+ return np.zeros((h, w), dtype=np.uint8)
461
+
462
+ if isinstance(mask_ann, list):
463
+ rles = maskUtils.frPyObjects(mask_ann, h, w)
464
+ rle = maskUtils.merge(rles)
465
+ elif isinstance(mask_ann['counts'], list):
466
+ # uncompressed RLE
467
+ rle = maskUtils.frPyObjects(mask_ann, h, w)
468
+ else:
469
+ # rle
470
+ rle = mask_ann
471
+ mask = maskUtils.decode(rle)
472
+ return mask
segagent/zzzmmz/SegAgent-Model/zero_to_fp32.py ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dict = torch.load(f, map_location=device)
147
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
148
+ # and also handle the case where it was already removed by another helper script
149
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
150
+ state_dicts.append(state_dict)
151
+
152
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
153
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
154
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
155
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
156
+
157
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
158
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
159
+ # use the max of the partition_count to get the dp world_size.
160
+
161
+ if type(world_size) is list:
162
+ world_size = max(world_size)
163
+
164
+ if world_size != total_files:
165
+ raise ValueError(
166
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
167
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
168
+ )
169
+
170
+ # the groups are named differently in each stage
171
+ if zero_stage <= 2:
172
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
173
+ elif zero_stage == 3:
174
+ fp32_groups_key = FP32_FLAT_GROUPS
175
+ else:
176
+ raise ValueError(f"unknown zero stage {zero_stage}")
177
+
178
+ if zero_stage <= 2:
179
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
180
+ elif zero_stage == 3:
181
+ # if there is more than one param group, there will be multiple flattened tensors - one
182
+ # flattened tensor per group - for simplicity merge them into a single tensor
183
+ #
184
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
185
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
186
+
187
+ fp32_flat_groups = [
188
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
189
+ ]
190
+
191
+ return zero_stage, world_size, fp32_flat_groups
192
+
193
+
194
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
195
+ """
196
+ Returns fp32 state_dict reconstructed from ds checkpoint
197
+
198
+ Args:
199
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
200
+
201
+ """
202
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
203
+
204
+ optim_files = get_optim_files(ds_checkpoint_dir)
205
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
206
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
207
+
208
+ model_files = get_model_state_files(ds_checkpoint_dir)
209
+
210
+ zero_model_states = parse_model_states(model_files)
211
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
212
+
213
+ if zero_stage <= 2:
214
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
215
+ elif zero_stage == 3:
216
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
217
+
218
+
219
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
220
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
221
+ return
222
+
223
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
224
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
225
+
226
+ if debug:
227
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
228
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
229
+
230
+ wanted_params = len(frozen_param_shapes)
231
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
232
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
233
+ print(f'Frozen params: Have {avail_numel} numels to process.')
234
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
235
+
236
+ total_params = 0
237
+ total_numel = 0
238
+ for name, shape in frozen_param_shapes.items():
239
+ total_params += 1
240
+ unpartitioned_numel = shape.numel()
241
+ total_numel += unpartitioned_numel
242
+
243
+ state_dict[name] = frozen_param_fragments[name]
244
+
245
+ if debug:
246
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
247
+
248
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
249
+
250
+
251
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
252
+ param_shapes = zero_model_states[0].param_shapes
253
+
254
+ # Reconstruction protocol:
255
+ #
256
+ # XXX: document this
257
+
258
+ if debug:
259
+ for i in range(world_size):
260
+ for j in range(len(fp32_flat_groups[0])):
261
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
262
+
263
+ # XXX: memory usage doubles here (zero2)
264
+ num_param_groups = len(fp32_flat_groups[0])
265
+ merged_single_partition_of_fp32_groups = []
266
+ for i in range(num_param_groups):
267
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
268
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
269
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
270
+ avail_numel = sum(
271
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
272
+
273
+ if debug:
274
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
275
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
276
+ # not asserting if there is a mismatch due to possible padding
277
+ print(f"Have {avail_numel} numels to process.")
278
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
279
+
280
+ # params
281
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
282
+ # out-of-core computing solution
283
+ total_numel = 0
284
+ total_params = 0
285
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
286
+ offset = 0
287
+ avail_numel = full_single_fp32_vector.numel()
288
+ for name, shape in shapes.items():
289
+
290
+ unpartitioned_numel = shape.numel()
291
+ total_numel += unpartitioned_numel
292
+ total_params += 1
293
+
294
+ if debug:
295
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
296
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
297
+ offset += unpartitioned_numel
298
+
299
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
300
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
301
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
302
+ # live optimizer object, so we are checking that the numbers are within the right range
303
+ align_to = 2 * world_size
304
+
305
+ def zero2_align(x):
306
+ return align_to * math.ceil(x / align_to)
307
+
308
+ if debug:
309
+ print(f"original offset={offset}, avail_numel={avail_numel}")
310
+
311
+ offset = zero2_align(offset)
312
+ avail_numel = zero2_align(avail_numel)
313
+
314
+ if debug:
315
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
316
+
317
+ # Sanity check
318
+ if offset != avail_numel:
319
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
320
+
321
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
322
+
323
+
324
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
325
+ state_dict = OrderedDict()
326
+
327
+ # buffers
328
+ buffers = zero_model_states[0].buffers
329
+ state_dict.update(buffers)
330
+ if debug:
331
+ print(f"added {len(buffers)} buffers")
332
+
333
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
334
+
335
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
336
+
337
+ # recover shared parameters
338
+ for pair in zero_model_states[0].shared_params:
339
+ if pair[1] in state_dict:
340
+ state_dict[pair[0]] = state_dict[pair[1]]
341
+
342
+ return state_dict
343
+
344
+
345
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
346
+ remainder = unpartitioned_numel % world_size
347
+ padding_numel = (world_size - remainder) if remainder else 0
348
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
349
+ return partitioned_numel, padding_numel
350
+
351
+
352
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
353
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
354
+ return
355
+
356
+ if debug:
357
+ for i in range(world_size):
358
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
359
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
360
+
361
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
362
+ wanted_params = len(frozen_param_shapes)
363
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
364
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
365
+ print(f'Frozen params: Have {avail_numel} numels to process.')
366
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
367
+
368
+ total_params = 0
369
+ total_numel = 0
370
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
371
+ total_params += 1
372
+ unpartitioned_numel = shape.numel()
373
+ total_numel += unpartitioned_numel
374
+
375
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
376
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
377
+
378
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
379
+
380
+ if debug:
381
+ print(
382
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
383
+ )
384
+
385
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
386
+
387
+
388
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
389
+ param_shapes = zero_model_states[0].param_shapes
390
+ avail_numel = fp32_flat_groups[0].numel() * world_size
391
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
392
+ # param, re-consolidating each param, while dealing with padding if any
393
+
394
+ # merge list of dicts, preserving order
395
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
396
+
397
+ if debug:
398
+ for i in range(world_size):
399
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
400
+
401
+ wanted_params = len(param_shapes)
402
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
403
+ # not asserting if there is a mismatch due to possible padding
404
+ avail_numel = fp32_flat_groups[0].numel() * world_size
405
+ print(f"Trainable params: Have {avail_numel} numels to process.")
406
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
407
+
408
+ # params
409
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
410
+ # out-of-core computing solution
411
+ offset = 0
412
+ total_numel = 0
413
+ total_params = 0
414
+ for name, shape in param_shapes.items():
415
+
416
+ unpartitioned_numel = shape.numel()
417
+ total_numel += unpartitioned_numel
418
+ total_params += 1
419
+
420
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
421
+
422
+ if debug:
423
+ print(
424
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
425
+ )
426
+
427
+ # XXX: memory usage doubles here
428
+ state_dict[name] = torch.cat(
429
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
430
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
431
+ offset += partitioned_numel
432
+
433
+ offset *= world_size
434
+
435
+ # Sanity check
436
+ if offset != avail_numel:
437
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
438
+
439
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
440
+
441
+
442
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
443
+ state_dict = OrderedDict()
444
+
445
+ # buffers
446
+ buffers = zero_model_states[0].buffers
447
+ state_dict.update(buffers)
448
+ if debug:
449
+ print(f"added {len(buffers)} buffers")
450
+
451
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
452
+
453
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
454
+
455
+ # recover shared parameters
456
+ for pair in zero_model_states[0].shared_params:
457
+ if pair[1] in state_dict:
458
+ state_dict[pair[0]] = state_dict[pair[1]]
459
+
460
+ return state_dict
461
+
462
+
463
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
464
+ """
465
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
466
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
467
+ via a model hub.
468
+
469
+ Args:
470
+ - ``checkpoint_dir``: path to the desired checkpoint folder
471
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
472
+
473
+ Returns:
474
+ - pytorch ``state_dict``
475
+
476
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
477
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
478
+ the checkpoint.
479
+
480
+ A typical usage might be ::
481
+
482
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
483
+ # do the training and checkpoint saving
484
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
485
+ model = model.cpu() # move to cpu
486
+ model.load_state_dict(state_dict)
487
+ # submit to model hub or save the model to share with others
488
+
489
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
490
+ application. i.e. you will need to re-initialize the deepspeed engine, since
491
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
492
+
493
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
494
+
495
+ """
496
+ if tag is None:
497
+ latest_path = os.path.join(checkpoint_dir, 'latest')
498
+ if os.path.isfile(latest_path):
499
+ with open(latest_path, 'r') as fd:
500
+ tag = fd.read().strip()
501
+ else:
502
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
503
+
504
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
505
+
506
+ if not os.path.isdir(ds_checkpoint_dir):
507
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
508
+
509
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
510
+
511
+
512
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
513
+ """
514
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
515
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
516
+
517
+ Args:
518
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
519
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
520
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
521
+ """
522
+
523
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
524
+ print(f"Saving fp32 state dict to {output_file}")
525
+ torch.save(state_dict, output_file)
526
+
527
+
528
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
529
+ """
530
+ 1. Put the provided model to cpu
531
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
532
+ 3. Load it into the provided model
533
+
534
+ Args:
535
+ - ``model``: the model object to update
536
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
537
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
538
+
539
+ Returns:
540
+ - ``model`: modified model
541
+
542
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
543
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
544
+ conveniently placed for you in the checkpoint folder.
545
+
546
+ A typical usage might be ::
547
+
548
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
549
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
550
+ # submit to model hub or save the model to share with others
551
+
552
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
553
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
554
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
555
+
556
+ """
557
+ logger.info(f"Extracting fp32 weights")
558
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
559
+
560
+ logger.info(f"Overwriting model with fp32 weights")
561
+ model = model.cpu()
562
+ model.load_state_dict(state_dict, strict=False)
563
+
564
+ return model
565
+
566
+
567
+ if __name__ == "__main__":
568
+
569
+ parser = argparse.ArgumentParser()
570
+ parser.add_argument("checkpoint_dir",
571
+ type=str,
572
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
573
+ parser.add_argument(
574
+ "output_file",
575
+ type=str,
576
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
577
+ parser.add_argument("-t",
578
+ "--tag",
579
+ type=str,
580
+ default=None,
581
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
582
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
583
+ args = parser.parse_args()
584
+
585
+ debug = args.debug
586
+
587
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file, tag=args.tag)