Xenova HF staff commited on
Commit
a44cb90
1 Parent(s): 454b67d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
  onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
  onnx/model.onnx_data filter=lfs diff=lfs merge=lfs -text
36
+ onnx/vision_encoder.onnx_data filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -1,248 +1,32 @@
1
  {
2
- "_commit_hash": "239af0aa4662d7fdac1fc5efe21f8f0cb0c35105",
3
  "_name_or_path": "facebook/sam-vit-huge",
4
  "architectures": [
5
  "SamModel"
6
  ],
7
  "initializer_range": 0.02,
8
  "mask_decoder_config": {
9
- "_name_or_path": "",
10
- "add_cross_attention": false,
11
- "architectures": null,
12
- "attention_downsample_rate": 2,
13
- "bad_words_ids": null,
14
- "begin_suppress_tokens": null,
15
- "bos_token_id": null,
16
- "chunk_size_feed_forward": 0,
17
- "cross_attention_hidden_size": null,
18
- "decoder_start_token_id": null,
19
- "diversity_penalty": 0.0,
20
- "do_sample": false,
21
- "early_stopping": false,
22
- "encoder_no_repeat_ngram_size": 0,
23
- "eos_token_id": null,
24
- "exponential_decay_length_penalty": null,
25
- "finetuning_task": null,
26
- "forced_bos_token_id": null,
27
- "forced_eos_token_id": null,
28
- "hidden_act": "relu",
29
- "hidden_size": 256,
30
- "id2label": {
31
- "0": "LABEL_0",
32
- "1": "LABEL_1"
33
- },
34
- "iou_head_depth": 3,
35
- "iou_head_hidden_dim": 256,
36
- "is_decoder": false,
37
- "is_encoder_decoder": false,
38
- "label2id": {
39
- "LABEL_0": 0,
40
- "LABEL_1": 1
41
- },
42
- "layer_norm_eps": 1e-06,
43
- "length_penalty": 1.0,
44
- "max_length": 20,
45
- "min_length": 0,
46
- "mlp_dim": 2048,
47
- "model_type": "",
48
- "no_repeat_ngram_size": 0,
49
- "num_attention_heads": 8,
50
- "num_beam_groups": 1,
51
- "num_beams": 1,
52
- "num_hidden_layers": 2,
53
- "num_multimask_outputs": 3,
54
- "num_return_sequences": 1,
55
- "output_attentions": false,
56
- "output_hidden_states": false,
57
- "output_scores": false,
58
- "pad_token_id": null,
59
- "prefix": null,
60
- "problem_type": null,
61
- "pruned_heads": {},
62
- "remove_invalid_values": false,
63
- "repetition_penalty": 1.0,
64
- "return_dict": true,
65
- "return_dict_in_generate": false,
66
- "sep_token_id": null,
67
- "suppress_tokens": null,
68
- "task_specific_params": null,
69
- "temperature": 1.0,
70
- "tf_legacy_loss": false,
71
- "tie_encoder_decoder": false,
72
- "tie_word_embeddings": true,
73
- "tokenizer_class": null,
74
- "top_k": 50,
75
- "top_p": 1.0,
76
- "torch_dtype": null,
77
- "torchscript": false,
78
- "transformers_version": "4.29.2",
79
- "typical_p": 1.0,
80
- "use_bfloat16": false
81
  },
82
  "model_type": "sam",
83
  "prompt_encoder_config": {
84
- "_name_or_path": "",
85
- "add_cross_attention": false,
86
- "architectures": null,
87
- "bad_words_ids": null,
88
- "begin_suppress_tokens": null,
89
- "bos_token_id": null,
90
- "chunk_size_feed_forward": 0,
91
- "cross_attention_hidden_size": null,
92
- "decoder_start_token_id": null,
93
- "diversity_penalty": 0.0,
94
- "do_sample": false,
95
- "early_stopping": false,
96
- "encoder_no_repeat_ngram_size": 0,
97
- "eos_token_id": null,
98
- "exponential_decay_length_penalty": null,
99
- "finetuning_task": null,
100
- "forced_bos_token_id": null,
101
- "forced_eos_token_id": null,
102
- "hidden_act": "gelu",
103
- "hidden_size": 256,
104
- "id2label": {
105
- "0": "LABEL_0",
106
- "1": "LABEL_1"
107
- },
108
- "image_embedding_size": 64,
109
- "image_size": 1024,
110
- "is_decoder": false,
111
- "is_encoder_decoder": false,
112
- "label2id": {
113
- "LABEL_0": 0,
114
- "LABEL_1": 1
115
- },
116
- "layer_norm_eps": 1e-06,
117
- "length_penalty": 1.0,
118
- "mask_input_channels": 16,
119
- "max_length": 20,
120
- "min_length": 0,
121
- "model_type": "",
122
- "no_repeat_ngram_size": 0,
123
- "num_beam_groups": 1,
124
- "num_beams": 1,
125
- "num_point_embeddings": 4,
126
- "num_return_sequences": 1,
127
- "output_attentions": false,
128
- "output_hidden_states": false,
129
- "output_scores": false,
130
- "pad_token_id": null,
131
- "patch_size": 16,
132
- "prefix": null,
133
- "problem_type": null,
134
- "pruned_heads": {},
135
- "remove_invalid_values": false,
136
- "repetition_penalty": 1.0,
137
- "return_dict": true,
138
- "return_dict_in_generate": false,
139
- "sep_token_id": null,
140
- "suppress_tokens": null,
141
- "task_specific_params": null,
142
- "temperature": 1.0,
143
- "tf_legacy_loss": false,
144
- "tie_encoder_decoder": false,
145
- "tie_word_embeddings": true,
146
- "tokenizer_class": null,
147
- "top_k": 50,
148
- "top_p": 1.0,
149
- "torch_dtype": null,
150
- "torchscript": false,
151
- "transformers_version": "4.29.2",
152
- "typical_p": 1.0,
153
- "use_bfloat16": false
154
  },
155
- "transformers_version": null,
156
  "vision_config": {
157
- "_name_or_path": "",
158
- "add_cross_attention": false,
159
- "architectures": null,
160
- "attention_dropout": 0.0,
161
- "bad_words_ids": null,
162
- "begin_suppress_tokens": null,
163
- "bos_token_id": null,
164
- "chunk_size_feed_forward": 0,
165
- "cross_attention_hidden_size": null,
166
- "decoder_start_token_id": null,
167
- "diversity_penalty": 0.0,
168
- "do_sample": false,
169
  "dropout": 0.0,
170
- "early_stopping": false,
171
- "encoder_no_repeat_ngram_size": 0,
172
- "eos_token_id": null,
173
- "exponential_decay_length_penalty": null,
174
- "finetuning_task": null,
175
- "forced_bos_token_id": null,
176
- "forced_eos_token_id": null,
177
  "global_attn_indexes": [
178
  7,
179
  15,
180
  23,
181
  31
182
  ],
183
- "hidden_act": "gelu",
184
  "hidden_size": 1280,
185
- "id2label": {
186
- "0": "LABEL_0",
187
- "1": "LABEL_1"
188
- },
189
- "image_size": 1024,
190
  "initializer_factor": 1.0,
191
- "initializer_range": 1e-10,
192
  "intermediate_size": 6144,
193
- "is_decoder": false,
194
- "is_encoder_decoder": false,
195
- "label2id": {
196
- "LABEL_0": 0,
197
- "LABEL_1": 1
198
- },
199
- "layer_norm_eps": 1e-06,
200
- "length_penalty": 1.0,
201
- "max_length": 20,
202
- "min_length": 0,
203
  "mlp_dim": 5120,
204
- "mlp_ratio": 4.0,
205
  "model_type": "",
206
- "no_repeat_ngram_size": 0,
207
  "num_attention_heads": 16,
208
- "num_beam_groups": 1,
209
- "num_beams": 1,
210
- "num_channels": 3,
211
  "num_hidden_layers": 32,
212
- "num_pos_feats": 128,
213
- "num_return_sequences": 1,
214
- "output_attentions": false,
215
- "output_channels": 256,
216
- "output_hidden_states": false,
217
- "output_scores": false,
218
- "pad_token_id": null,
219
- "patch_size": 16,
220
- "prefix": null,
221
- "problem_type": null,
222
- "projection_dim": 512,
223
- "pruned_heads": {},
224
- "qkv_bias": true,
225
- "remove_invalid_values": false,
226
- "repetition_penalty": 1.0,
227
- "return_dict": true,
228
- "return_dict_in_generate": false,
229
- "sep_token_id": null,
230
- "suppress_tokens": null,
231
- "task_specific_params": null,
232
- "temperature": 1.0,
233
- "tf_legacy_loss": false,
234
- "tie_encoder_decoder": false,
235
- "tie_word_embeddings": true,
236
- "tokenizer_class": null,
237
- "top_k": 50,
238
- "top_p": 1.0,
239
- "torch_dtype": null,
240
- "torchscript": false,
241
- "transformers_version": "4.29.2",
242
- "typical_p": 1.0,
243
- "use_abs_pos": true,
244
- "use_bfloat16": false,
245
- "use_rel_pos": true,
246
- "window_size": 14
247
  }
248
  }
 
1
  {
 
2
  "_name_or_path": "facebook/sam-vit-huge",
3
  "architectures": [
4
  "SamModel"
5
  ],
6
  "initializer_range": 0.02,
7
  "mask_decoder_config": {
8
+ "model_type": ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  },
10
  "model_type": "sam",
11
  "prompt_encoder_config": {
12
+ "model_type": ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  },
14
+ "transformers_version": "4.33.0.dev0",
15
  "vision_config": {
 
 
 
 
 
 
 
 
 
 
 
 
16
  "dropout": 0.0,
 
 
 
 
 
 
 
17
  "global_attn_indexes": [
18
  7,
19
  15,
20
  23,
21
  31
22
  ],
 
23
  "hidden_size": 1280,
 
 
 
 
 
24
  "initializer_factor": 1.0,
 
25
  "intermediate_size": 6144,
 
 
 
 
 
 
 
 
 
 
26
  "mlp_dim": 5120,
 
27
  "model_type": "",
 
28
  "num_attention_heads": 16,
 
 
 
29
  "num_hidden_layers": 32,
30
+ "projection_dim": 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
  }
onnx/prompt_encoder_mask_decoder.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bf9ed56968e0e4070629ba4190a715be9b09fe6e7c6afcf7ed70703cded437
3
+ size 16557844
onnx/prompt_encoder_mask_decoder_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e229d24889ff0550f67e95b8ce68dec42492a6c25eacb9e47b1c2e4d3f37ff94
3
+ size 4903718
onnx/vision_encoder.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f29ddfa848550eb784ce3fb4164588a27328e28c5e697f975ab92a3be27cfc8e
3
+ size 2001683
onnx/vision_encoder.onnx_data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e452133f5d9dec76ba7d518a3690e5989e13f894ab07d561c38180a692ab353a
3
+ size 2548105216
onnx/vision_encoder_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c84d6fce9338c8ca204224e4a2354150fb3be2b5d121a463207d2103a02c46bd
3
+ size 659577872
quantize_config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "prompt_encoder_mask_decoder": {
6
+ "op_types": [
7
+ "Reshape",
8
+ "Concat",
9
+ "OneHot",
10
+ "ReduceMean",
11
+ "Gather",
12
+ "Sin",
13
+ "Div",
14
+ "Add",
15
+ "Slice",
16
+ "ScatterND",
17
+ "ConstantOfShape",
18
+ "Where",
19
+ "Expand",
20
+ "Constant",
21
+ "Sqrt",
22
+ "Mul",
23
+ "Sub",
24
+ "Not",
25
+ "Cos",
26
+ "Range",
27
+ "MatMul",
28
+ "Equal",
29
+ "Tile",
30
+ "Erf",
31
+ "Shape",
32
+ "Softmax",
33
+ "Neg",
34
+ "Unsqueeze",
35
+ "Transpose",
36
+ "ConvTranspose",
37
+ "Pow",
38
+ "Relu",
39
+ "Cast"
40
+ ],
41
+ "weight_type": "QInt8"
42
+ },
43
+ "vision_encoder": {
44
+ "op_types": [
45
+ "Reshape",
46
+ "Concat",
47
+ "ReduceMean",
48
+ "Gather",
49
+ "Sin",
50
+ "Div",
51
+ "Add",
52
+ "Slice",
53
+ "Pad",
54
+ "Resize",
55
+ "Conv",
56
+ "ConstantOfShape",
57
+ "Identity",
58
+ "Expand",
59
+ "Constant",
60
+ "Sqrt",
61
+ "Mul",
62
+ "Squeeze",
63
+ "Sub",
64
+ "Cos",
65
+ "MatMul",
66
+ "Range",
67
+ "Tile",
68
+ "Erf",
69
+ "Shape",
70
+ "Softmax",
71
+ "Einsum",
72
+ "Unsqueeze",
73
+ "Split",
74
+ "Transpose",
75
+ "Mod",
76
+ "Pow",
77
+ "Cast"
78
+ ],
79
+ "weight_type": "QUInt8"
80
+ }
81
+ }
82
+ }