openvino-ci commited on
Commit
8d2a7a0
1 Parent(s): c72588b

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,11 +1,11 @@
1
  ---
2
  license: gemma
3
  license_link: https://choosealicense.com/licenses/gemma/
4
- base_model:
5
- - google/gemma-2b-it
6
  ---
7
  # gemma-2b-it-fp16-ov
8
- * Model creator: [Google](https://huggingface.co/google)
9
  * Original model: [gemma-2b-it](https://huggingface.co/google/gemma-2b-it)
10
 
11
  ## Description
@@ -14,10 +14,10 @@ base_model:
14
 
15
  The provided OpenVINO™ IR model is compatible with:
16
 
17
- * OpenVINO version 2024.4.0 and higher
18
- * Optimum Intel 1.20.0 and higher
19
 
20
- ## Running Model Inference with [Optimum Intel](https://huggingface.co/docs/optimum/intel/index)
21
 
22
  1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend:
23
 
@@ -44,37 +44,6 @@ print(text)
44
 
45
  For more examples and possible optimizations, refer to the [OpenVINO Large Language Model Inference Guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).
46
 
47
- ## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai)
48
-
49
- 1. Install packages required for using OpenVINO GenAI.
50
- ```
51
- pip install openvino-genai huggingface_hub
52
- ```
53
-
54
- 2. Download model from HuggingFace Hub
55
-
56
- ```
57
- import huggingface_hub as hf_hub
58
-
59
- model_id = "OpenVINO/gemma-2b-it-fp16-ov"
60
- model_path = "gemma-2b-it-fp16-ov"
61
-
62
- hf_hub.snapshot_download(model_id, local_dir=model_path)
63
-
64
- ```
65
-
66
- 3. Run model inference:
67
-
68
- ```
69
- import openvino_genai as ov_genai
70
-
71
- device = "CPU"
72
- pipe = ov_genai.LLMPipeline(model_path, device)
73
- print(pipe.generate("What is OpenVINO?", max_length=200))
74
- ```
75
-
76
- More GenAI usage examples can be found in OpenVINO GenAI library [docs](https://github.com/openvinotoolkit/openvino.genai/blob/master/src/README.md) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples)
77
-
78
  ## Limitations
79
 
80
  Check the original model card for [original model card](https://huggingface.co/google/gemma-2b-it) for limitations.
@@ -85,4 +54,4 @@ The original model is distributed under [gemma](https://choosealicense.com/licen
85
 
86
  ## Disclaimer
87
 
88
- Intel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.
 
1
  ---
2
  license: gemma
3
  license_link: https://choosealicense.com/licenses/gemma/
4
+ base_model: google/gemma-2b-it
5
+
6
  ---
7
  # gemma-2b-it-fp16-ov
8
+ * Model creator: [google](https://huggingface.co/google)
9
  * Original model: [gemma-2b-it](https://huggingface.co/google/gemma-2b-it)
10
 
11
  ## Description
 
14
 
15
  The provided OpenVINO™ IR model is compatible with:
16
 
17
+ * OpenVINO version 2024.5.0 and higher
18
+ * Optimum Intel 1.21.0 and higher
19
 
20
+ ## Running Model Inference
21
 
22
  1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend:
23
 
 
44
 
45
  For more examples and possible optimizations, refer to the [OpenVINO Large Language Model Inference Guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ## Limitations
48
 
49
  Check the original model card for [original model card](https://huggingface.co/google/gemma-2b-it) for limitations.
 
54
 
55
  ## Disclaimer
56
 
57
+ Intel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.
config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
- "_name_or_path": "google/gemma-2b-it",
 
3
  "architectures": [
4
  "GemmaForCausalLM"
5
  ],
@@ -23,7 +24,7 @@
23
  "rope_scaling": null,
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "float16",
26
- "transformers_version": "4.45.2",
27
  "use_cache": true,
28
  "vocab_size": 256000
29
  }
 
1
  {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "OpenVINO/gemma-2b-it-fp16-ov",
4
  "architectures": [
5
  "GemmaForCausalLM"
6
  ],
 
24
  "rope_scaling": null,
25
  "rope_theta": 10000.0,
26
  "torch_dtype": "float16",
27
+ "transformers_version": "4.46.3",
28
  "use_cache": true,
29
  "vocab_size": 256000
30
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "bos_token_id": 2,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.45.2"
7
  }
 
3
  "bos_token_id": 2,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.46.3"
7
  }
openvino_detokenizer.xml CHANGED
@@ -1,16 +1,16 @@
1
  <?xml version="1.0"?>
2
  <net name="detokenizer" version="11">
3
  <layers>
4
- <layer id="0" name="Parameter_69827" type="Parameter" version="opset1">
5
  <data shape="?,?" element_type="i64" />
6
  <output>
7
- <port id="0" precision="I64" names="Parameter_69827">
8
  <dim>-1</dim>
9
  <dim>-1</dim>
10
  </port>
11
  </output>
12
  </layer>
13
- <layer id="1" name="Convert_69844" type="Convert" version="opset1">
14
  <data destination_type="i32" />
15
  <input>
16
  <port id="0" precision="I64">
@@ -25,7 +25,7 @@
25
  </port>
26
  </output>
27
  </layer>
28
- <layer id="2" name="Constant_69794" type="Const" version="opset1">
29
  <data element_type="u8" shape="2955910" offset="0" size="2955910" />
30
  <output>
31
  <port id="0" precision="U8">
@@ -33,7 +33,7 @@
33
  </port>
34
  </output>
35
  </layer>
36
- <layer id="3" name="StringTensorUnpack_69795" type="StringTensorUnpack" version="extension">
37
  <data mode="begins_ends" />
38
  <input>
39
  <port id="0" precision="U8">
@@ -52,7 +52,7 @@
52
  </port>
53
  </output>
54
  </layer>
55
- <layer id="4" name="VocabDecoder_69828" type="VocabDecoder" version="extension">
56
  <data skip_tokens="0, 1, 2, 3, 106, 107" />
57
  <input>
58
  <port id="0" precision="I32">
@@ -87,7 +87,7 @@
87
  </port>
88
  </output>
89
  </layer>
90
- <layer id="5" name="Constant_69830" type="Const" version="opset1">
91
  <data element_type="u8" shape="3" offset="2955910" size="3" />
92
  <output>
93
  <port id="0" precision="U8">
@@ -95,7 +95,7 @@
95
  </port>
96
  </output>
97
  </layer>
98
- <layer id="6" name="Constant_69832" type="Const" version="opset1">
99
  <data element_type="u8" shape="1" offset="2955913" size="1" />
100
  <output>
101
  <port id="0" precision="U8">
@@ -103,7 +103,7 @@
103
  </port>
104
  </output>
105
  </layer>
106
- <layer id="7" name="RegexNormalization_69833" type="RegexNormalization" version="extension">
107
  <data global_replace="true" />
108
  <input>
109
  <port id="0" precision="I32">
@@ -134,7 +134,7 @@
134
  </port>
135
  </output>
136
  </layer>
137
- <layer id="8" name="ByteFallback_69834" type="ByteFallback" version="extension">
138
  <input>
139
  <port id="0" precision="I32">
140
  <dim>-1</dim>
@@ -158,7 +158,7 @@
158
  </port>
159
  </output>
160
  </layer>
161
- <layer id="9" name="FuzeRagged_69835" type="FuzeRagged" version="extension">
162
  <input>
163
  <port id="0" precision="I32">
164
  <dim>-1</dim>
@@ -182,7 +182,7 @@
182
  </port>
183
  </output>
184
  </layer>
185
- <layer id="10" name="StringTensorPack_69836" type="StringTensorPack" version="extension">
186
  <data mode="begins_ends" />
187
  <input>
188
  <port id="0" precision="I32">
@@ -201,7 +201,7 @@
201
  </port>
202
  </output>
203
  </layer>
204
- <layer id="11" name="Result_69837" type="Result" version="opset1">
205
  <input>
206
  <port id="0" precision="STRING">
207
  <dim>-1</dim>
@@ -234,10 +234,30 @@
234
  <edge from-layer="10" from-port="3" to-layer="11" to-port="0" />
235
  </edges>
236
  <rt_info>
 
 
 
237
  <bos_token_id value="2" />
238
  <chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '&lt;start_of_turn>' + role + '&#10;' + message['content'] | trim + '&lt;end_of_turn>&#10;' }}{% endfor %}{% if add_generation_prompt %}{{'&lt;start_of_turn>model&#10;'}}{% endif %}" />
 
 
239
  <eos_token_id value="1" />
 
 
 
 
240
  <original_tokenizer_class value="&lt;class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" />
241
  <pad_token_id value="0" />
 
 
 
 
 
 
 
 
 
 
 
242
  </rt_info>
243
  </net>
 
1
  <?xml version="1.0"?>
2
  <net name="detokenizer" version="11">
3
  <layers>
4
+ <layer id="0" name="Parameter_49371" type="Parameter" version="opset1">
5
  <data shape="?,?" element_type="i64" />
6
  <output>
7
+ <port id="0" precision="I64" names="Parameter_49371">
8
  <dim>-1</dim>
9
  <dim>-1</dim>
10
  </port>
11
  </output>
12
  </layer>
13
+ <layer id="1" name="Convert_49388" type="Convert" version="opset1">
14
  <data destination_type="i32" />
15
  <input>
16
  <port id="0" precision="I64">
 
25
  </port>
26
  </output>
27
  </layer>
28
+ <layer id="2" name="Constant_49338" type="Const" version="opset1">
29
  <data element_type="u8" shape="2955910" offset="0" size="2955910" />
30
  <output>
31
  <port id="0" precision="U8">
 
33
  </port>
34
  </output>
35
  </layer>
36
+ <layer id="3" name="StringTensorUnpack_49339" type="StringTensorUnpack" version="extension">
37
  <data mode="begins_ends" />
38
  <input>
39
  <port id="0" precision="U8">
 
52
  </port>
53
  </output>
54
  </layer>
55
+ <layer id="4" name="VocabDecoder_49372" type="VocabDecoder" version="extension">
56
  <data skip_tokens="0, 1, 2, 3, 106, 107" />
57
  <input>
58
  <port id="0" precision="I32">
 
87
  </port>
88
  </output>
89
  </layer>
90
+ <layer id="5" name="Constant_49374" type="Const" version="opset1">
91
  <data element_type="u8" shape="3" offset="2955910" size="3" />
92
  <output>
93
  <port id="0" precision="U8">
 
95
  </port>
96
  </output>
97
  </layer>
98
+ <layer id="6" name="Constant_49376" type="Const" version="opset1">
99
  <data element_type="u8" shape="1" offset="2955913" size="1" />
100
  <output>
101
  <port id="0" precision="U8">
 
103
  </port>
104
  </output>
105
  </layer>
106
+ <layer id="7" name="RegexNormalization_49377" type="RegexNormalization" version="extension">
107
  <data global_replace="true" />
108
  <input>
109
  <port id="0" precision="I32">
 
134
  </port>
135
  </output>
136
  </layer>
137
+ <layer id="8" name="ByteFallback_49378" type="ByteFallback" version="extension">
138
  <input>
139
  <port id="0" precision="I32">
140
  <dim>-1</dim>
 
158
  </port>
159
  </output>
160
  </layer>
161
+ <layer id="9" name="FuzeRagged_49379" type="FuzeRagged" version="extension">
162
  <input>
163
  <port id="0" precision="I32">
164
  <dim>-1</dim>
 
182
  </port>
183
  </output>
184
  </layer>
185
+ <layer id="10" name="StringTensorPack_49380" type="StringTensorPack" version="extension">
186
  <data mode="begins_ends" />
187
  <input>
188
  <port id="0" precision="I32">
 
201
  </port>
202
  </output>
203
  </layer>
204
+ <layer id="11" name="Result_49381" type="Result" version="opset1">
205
  <input>
206
  <port id="0" precision="STRING">
207
  <dim>-1</dim>
 
234
  <edge from-layer="10" from-port="3" to-layer="11" to-port="0" />
235
  </edges>
236
  <rt_info>
237
+ <add_attention_mask value="True" />
238
+ <add_prefix_space />
239
+ <add_special_tokens value="True" />
240
  <bos_token_id value="2" />
241
  <chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '&lt;start_of_turn>' + role + '&#10;' + message['content'] | trim + '&lt;end_of_turn>&#10;' }}{% endfor %}{% if add_generation_prompt %}{{'&lt;start_of_turn>model&#10;'}}{% endif %}" />
242
+ <clean_up_tokenization_spaces />
243
+ <detokenizer_input_type value="i64" />
244
  <eos_token_id value="1" />
245
+ <handle_special_tokens_with_re />
246
+ <number_of_inputs value="1" />
247
+ <openvino_tokenizers_version value="2024.5.0.0" />
248
+ <openvino_version value="2024.5.0" />
249
  <original_tokenizer_class value="&lt;class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" />
250
  <pad_token_id value="0" />
251
+ <sentencepiece_version value="0.2.0" />
252
+ <skip_special_tokens value="True" />
253
+ <streaming_detokenizer value="False" />
254
+ <tiktoken_version value="0.8.0" />
255
+ <tokenizer_output_type value="i64" />
256
+ <tokenizers_version value="0.20.3" />
257
+ <transformers_version value="4.46.3" />
258
+ <use_max_padding value="False" />
259
+ <use_sentencepiece_backend value="False" />
260
+ <utf8_replace_mode />
261
+ <with_detokenizer value="True" />
262
  </rt_info>
263
  </net>
openvino_model.xml CHANGED
The diff for this file is too large to render. See raw diff
 
openvino_tokenizer.xml CHANGED
@@ -1,27 +1,27 @@
1
  <?xml version="1.0"?>
2
  <net name="tokenizer" version="11">
3
  <layers>
4
- <layer id="0" name="Parameter_69711" type="Parameter" version="opset1">
5
  <data shape="?" element_type="string" />
6
  <output>
7
- <port id="0" precision="STRING" names="Parameter_69711">
8
  <dim>-1</dim>
9
  </port>
10
  </output>
11
  </layer>
12
- <layer id="1" name="Constant_69811" type="Const" version="opset1">
13
  <data element_type="i32" shape="" offset="0" size="4" />
14
  <output>
15
  <port id="0" precision="I32" />
16
  </output>
17
  </layer>
18
- <layer id="2" name="Constant_69812" type="Const" version="opset1">
19
  <data element_type="i32" shape="" offset="4" size="4" />
20
  <output>
21
  <port id="0" precision="I32" />
22
  </output>
23
  </layer>
24
- <layer id="3" name="Constant_69813" type="Const" version="opset1">
25
  <data element_type="i32" shape="1" offset="8" size="4" />
26
  <output>
27
  <port id="0" precision="I32">
@@ -29,13 +29,13 @@
29
  </port>
30
  </output>
31
  </layer>
32
- <layer id="4" name="Constant_69717" type="Const" version="opset1">
33
  <data element_type="i64" shape="" offset="12" size="8" />
34
  <output>
35
  <port id="0" precision="I64" />
36
  </output>
37
  </layer>
38
- <layer id="5" name="StringTensorUnpack_69712" type="StringTensorUnpack" version="extension">
39
  <data mode="begins_ends" />
40
  <input>
41
  <port id="0" precision="STRING">
@@ -54,7 +54,7 @@
54
  </port>
55
  </output>
56
  </layer>
57
- <layer id="6" name="ShapeOf_69713" type="ShapeOf" version="opset3">
58
  <data output_type="i64" />
59
  <input>
60
  <port id="0" precision="I32">
@@ -67,19 +67,19 @@
67
  </port>
68
  </output>
69
  </layer>
70
- <layer id="7" name="Constant_69714" type="Const" version="opset1">
71
  <data element_type="i64" shape="" offset="12" size="8" />
72
  <output>
73
  <port id="0" precision="I64" />
74
  </output>
75
  </layer>
76
- <layer id="8" name="Constant_69715" type="Const" version="opset1">
77
  <data element_type="i64" shape="" offset="12" size="8" />
78
  <output>
79
  <port id="0" precision="I64" />
80
  </output>
81
  </layer>
82
- <layer id="9" name="Gather_69716" type="Gather" version="opset8">
83
  <data batch_dims="0" />
84
  <input>
85
  <port id="0" precision="I64">
@@ -92,13 +92,13 @@
92
  <port id="3" precision="I64" />
93
  </output>
94
  </layer>
95
- <layer id="10" name="Constant_69718" type="Const" version="opset1">
96
  <data element_type="i64" shape="" offset="20" size="8" />
97
  <output>
98
  <port id="0" precision="I64" />
99
  </output>
100
  </layer>
101
- <layer id="11" name="Range_69719" type="Range" version="opset4">
102
  <data output_type="i32" />
103
  <input>
104
  <port id="0" precision="I64" />
@@ -111,19 +111,19 @@
111
  </port>
112
  </output>
113
  </layer>
114
- <layer id="12" name="Constant_69720" type="Const" version="opset1">
115
  <data element_type="i64" shape="" offset="20" size="8" />
116
  <output>
117
  <port id="0" precision="I64" />
118
  </output>
119
  </layer>
120
- <layer id="13" name="Constant_69721" type="Const" version="opset1">
121
  <data element_type="i64" shape="" offset="20" size="8" />
122
  <output>
123
  <port id="0" precision="I64" />
124
  </output>
125
  </layer>
126
- <layer id="14" name="Add_69722" type="Add" version="opset1">
127
  <data auto_broadcast="numpy" />
128
  <input>
129
  <port id="0" precision="I64" />
@@ -133,13 +133,13 @@
133
  <port id="2" precision="I64" />
134
  </output>
135
  </layer>
136
- <layer id="15" name="Constant_69723" type="Const" version="opset1">
137
  <data element_type="i64" shape="" offset="20" size="8" />
138
  <output>
139
  <port id="0" precision="I64" />
140
  </output>
141
  </layer>
142
- <layer id="16" name="Range_69724" type="Range" version="opset4">
143
  <data output_type="i32" />
144
  <input>
145
  <port id="0" precision="I64" />
@@ -152,7 +152,7 @@
152
  </port>
153
  </output>
154
  </layer>
155
- <layer id="17" name="Constant_69786" type="Const" version="opset1">
156
  <data element_type="u8" shape="5282" offset="28" size="5282" />
157
  <output>
158
  <port id="0" precision="U8">
@@ -160,7 +160,7 @@
160
  </port>
161
  </output>
162
  </layer>
163
- <layer id="18" name="SpecialTokensSplit_69787" type="SpecialTokensSplit" version="extension">
164
  <input>
165
  <port id="0" precision="I32">
166
  <dim>-1</dim>
@@ -202,7 +202,7 @@
202
  </port>
203
  </output>
204
  </layer>
205
- <layer id="19" name="Constant_69789" type="Const" version="opset1">
206
  <data element_type="u8" shape="1" offset="5310" size="1" />
207
  <output>
208
  <port id="0" precision="U8">
@@ -210,7 +210,7 @@
210
  </port>
211
  </output>
212
  </layer>
213
- <layer id="20" name="Constant_69791" type="Const" version="opset1">
214
  <data element_type="u8" shape="3" offset="5311" size="3" />
215
  <output>
216
  <port id="0" precision="U8">
@@ -218,7 +218,7 @@
218
  </port>
219
  </output>
220
  </layer>
221
- <layer id="21" name="RegexNormalization_69792" type="RegexNormalization" version="extension">
222
  <data global_replace="true" />
223
  <input>
224
  <port id="0" precision="I32">
@@ -255,7 +255,7 @@
255
  </port>
256
  </output>
257
  </layer>
258
- <layer id="22" name="Constant_69794" type="Const" version="opset1">
259
  <data element_type="u8" shape="2955910" offset="5314" size="2955910" />
260
  <output>
261
  <port id="0" precision="U8">
@@ -263,7 +263,7 @@
263
  </port>
264
  </output>
265
  </layer>
266
- <layer id="23" name="StringTensorUnpack_69795" type="StringTensorUnpack" version="extension">
267
  <data mode="begins_ends" />
268
  <input>
269
  <port id="0" precision="U8">
@@ -282,7 +282,7 @@
282
  </port>
283
  </output>
284
  </layer>
285
- <layer id="24" name="Constant_69800" type="Const" version="opset1">
286
  <data element_type="u8" shape="5031736" offset="2961224" size="5031736" />
287
  <output>
288
  <port id="0" precision="U8">
@@ -290,7 +290,7 @@
290
  </port>
291
  </output>
292
  </layer>
293
- <layer id="25" name="StringTensorUnpack_69801" type="StringTensorUnpack" version="extension">
294
  <data mode="begins_ends" />
295
  <input>
296
  <port id="0" precision="U8">
@@ -309,7 +309,7 @@
309
  </port>
310
  </output>
311
  </layer>
312
- <layer id="26" name="Constant_69803" type="Const" version="opset1">
313
  <data element_type="u8" shape="4245743" offset="7992960" size="4245743" />
314
  <output>
315
  <port id="0" precision="U8">
@@ -317,7 +317,7 @@
317
  </port>
318
  </output>
319
  </layer>
320
- <layer id="27" name="StringTensorUnpack_69804" type="StringTensorUnpack" version="extension">
321
  <data mode="begins_ends" />
322
  <input>
323
  <port id="0" precision="U8">
@@ -336,7 +336,7 @@
336
  </port>
337
  </output>
338
  </layer>
339
- <layer id="28" name="Constant_69797" type="Const" version="opset1">
340
  <data element_type="u8" shape="4170" offset="12238703" size="4170" />
341
  <output>
342
  <port id="0" precision="U8">
@@ -344,7 +344,7 @@
344
  </port>
345
  </output>
346
  </layer>
347
- <layer id="29" name="StringTensorUnpack_69798" type="StringTensorUnpack" version="extension">
348
  <data mode="begins_ends" />
349
  <input>
350
  <port id="0" precision="U8">
@@ -363,7 +363,7 @@
363
  </port>
364
  </output>
365
  </layer>
366
- <layer id="30" name="Constant_69805" type="Const" version="opset1">
367
  <data element_type="i32" shape="216" offset="12242873" size="864" />
368
  <output>
369
  <port id="0" precision="I32">
@@ -371,7 +371,7 @@
371
  </port>
372
  </output>
373
  </layer>
374
- <layer id="31" name="BPETokenizer_69806" type="BPETokenizer" version="extension">
375
  <data unk_token="&lt;unk>" fuse_unk="true" suffix_indicator="" end_suffix="" byte_fallback="true" cache_capacity="51200" />
376
  <input>
377
  <port id="0" precision="I32">
@@ -441,7 +441,7 @@
441
  </port>
442
  </output>
443
  </layer>
444
- <layer id="32" name="Subtract_69807" type="Subtract" version="opset1">
445
  <data auto_broadcast="numpy" />
446
  <input>
447
  <port id="0" precision="I32">
@@ -457,13 +457,13 @@
457
  </port>
458
  </output>
459
  </layer>
460
- <layer id="33" name="Constant_69808" type="Const" version="opset1">
461
  <data element_type="i32" shape="" offset="12243737" size="4" />
462
  <output>
463
  <port id="0" precision="I32" />
464
  </output>
465
  </layer>
466
- <layer id="34" name="Minimum_69809" type="Minimum" version="opset1">
467
  <data auto_broadcast="numpy" />
468
  <input>
469
  <port id="0" precision="I32">
@@ -477,7 +477,7 @@
477
  </port>
478
  </output>
479
  </layer>
480
- <layer id="35" name="Subtract_69810" type="Subtract" version="opset1">
481
  <data auto_broadcast="numpy" />
482
  <input>
483
  <port id="0" precision="I32">
@@ -493,7 +493,7 @@
493
  </port>
494
  </output>
495
  </layer>
496
- <layer id="36" name="Constant_69814" type="Const" version="opset1">
497
  <data element_type="i32" shape="2" offset="12" size="8" />
498
  <output>
499
  <port id="0" precision="I32">
@@ -501,7 +501,7 @@
501
  </port>
502
  </output>
503
  </layer>
504
- <layer id="37" name="CombineSegments_69815" type="CombineSegments" version="extension">
505
  <input>
506
  <port id="0" precision="I32" />
507
  <port id="1" precision="I32" />
@@ -542,7 +542,7 @@
542
  </port>
543
  </output>
544
  </layer>
545
- <layer id="38" name="Subtract_69816" type="Subtract" version="opset1">
546
  <data auto_broadcast="numpy" />
547
  <input>
548
  <port id="0" precision="I32">
@@ -558,13 +558,13 @@
558
  </port>
559
  </output>
560
  </layer>
561
- <layer id="39" name="Constant_69817" type="Const" version="opset1">
562
  <data element_type="i32" shape="" offset="0" size="4" />
563
  <output>
564
  <port id="0" precision="I32" />
565
  </output>
566
  </layer>
567
- <layer id="40" name="ReduceMax_69818" type="ReduceMax" version="opset1">
568
  <data keep_dims="false" />
569
  <input>
570
  <port id="0" precision="I32">
@@ -576,13 +576,13 @@
576
  <port id="2" precision="I32" />
577
  </output>
578
  </layer>
579
- <layer id="41" name="Constant_69819" type="Const" version="opset1">
580
  <data element_type="i32" shape="" offset="0" size="4" />
581
  <output>
582
  <port id="0" precision="I32" />
583
  </output>
584
  </layer>
585
- <layer id="42" name="RaggedToDense_69820" type="RaggedToDense" version="extension">
586
  <data pad_right="false" />
587
  <input>
588
  <port id="0" precision="I32">
@@ -608,7 +608,7 @@
608
  </port>
609
  </output>
610
  </layer>
611
- <layer id="43" name="Convert_69821" type="Convert" version="opset1">
612
  <data destination_type="i32" />
613
  <input>
614
  <port id="0" precision="BOOL">
@@ -623,7 +623,7 @@
623
  </port>
624
  </output>
625
  </layer>
626
- <layer id="44" name="Convert_69821" type="Convert" version="opset1">
627
  <data destination_type="i64" />
628
  <input>
629
  <port id="0" precision="I32">
@@ -638,7 +638,7 @@
638
  </port>
639
  </output>
640
  </layer>
641
- <layer id="46" name="RaggedToDense_69820.0" type="Convert" version="opset1">
642
  <data destination_type="i64" />
643
  <input>
644
  <port id="0" precision="I32">
@@ -653,7 +653,7 @@
653
  </port>
654
  </output>
655
  </layer>
656
- <layer id="47" name="Result_69824" type="Result" version="opset1">
657
  <input>
658
  <port id="0" precision="I64">
659
  <dim>-1</dim>
@@ -661,7 +661,7 @@
661
  </port>
662
  </input>
663
  </layer>
664
- <layer id="45" name="Result_69826" type="Result" version="opset1">
665
  <input>
666
  <port id="0" precision="I64">
667
  <dim>-1</dim>
@@ -747,10 +747,30 @@
747
  <edge from-layer="46" from-port="1" to-layer="47" to-port="0" />
748
  </edges>
749
  <rt_info>
 
 
 
750
  <bos_token_id value="2" />
751
  <chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '&lt;start_of_turn>' + role + '&#10;' + message['content'] | trim + '&lt;end_of_turn>&#10;' }}{% endfor %}{% if add_generation_prompt %}{{'&lt;start_of_turn>model&#10;'}}{% endif %}" />
 
 
752
  <eos_token_id value="1" />
 
 
 
 
753
  <original_tokenizer_class value="&lt;class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" />
754
  <pad_token_id value="0" />
 
 
 
 
 
 
 
 
 
 
 
755
  </rt_info>
756
  </net>
 
1
  <?xml version="1.0"?>
2
  <net name="tokenizer" version="11">
3
  <layers>
4
+ <layer id="0" name="Parameter_49255" type="Parameter" version="opset1">
5
  <data shape="?" element_type="string" />
6
  <output>
7
+ <port id="0" precision="STRING" names="Parameter_49255">
8
  <dim>-1</dim>
9
  </port>
10
  </output>
11
  </layer>
12
+ <layer id="1" name="Constant_49355" type="Const" version="opset1">
13
  <data element_type="i32" shape="" offset="0" size="4" />
14
  <output>
15
  <port id="0" precision="I32" />
16
  </output>
17
  </layer>
18
+ <layer id="2" name="Constant_49356" type="Const" version="opset1">
19
  <data element_type="i32" shape="" offset="4" size="4" />
20
  <output>
21
  <port id="0" precision="I32" />
22
  </output>
23
  </layer>
24
+ <layer id="3" name="Constant_49357" type="Const" version="opset1">
25
  <data element_type="i32" shape="1" offset="8" size="4" />
26
  <output>
27
  <port id="0" precision="I32">
 
29
  </port>
30
  </output>
31
  </layer>
32
+ <layer id="4" name="Constant_49261" type="Const" version="opset1">
33
  <data element_type="i64" shape="" offset="12" size="8" />
34
  <output>
35
  <port id="0" precision="I64" />
36
  </output>
37
  </layer>
38
+ <layer id="5" name="StringTensorUnpack_49256" type="StringTensorUnpack" version="extension">
39
  <data mode="begins_ends" />
40
  <input>
41
  <port id="0" precision="STRING">
 
54
  </port>
55
  </output>
56
  </layer>
57
+ <layer id="6" name="ShapeOf_49257" type="ShapeOf" version="opset3">
58
  <data output_type="i64" />
59
  <input>
60
  <port id="0" precision="I32">
 
67
  </port>
68
  </output>
69
  </layer>
70
+ <layer id="7" name="Constant_49258" type="Const" version="opset1">
71
  <data element_type="i64" shape="" offset="12" size="8" />
72
  <output>
73
  <port id="0" precision="I64" />
74
  </output>
75
  </layer>
76
+ <layer id="8" name="Constant_49259" type="Const" version="opset1">
77
  <data element_type="i64" shape="" offset="12" size="8" />
78
  <output>
79
  <port id="0" precision="I64" />
80
  </output>
81
  </layer>
82
+ <layer id="9" name="Gather_49260" type="Gather" version="opset8">
83
  <data batch_dims="0" />
84
  <input>
85
  <port id="0" precision="I64">
 
92
  <port id="3" precision="I64" />
93
  </output>
94
  </layer>
95
+ <layer id="10" name="Constant_49262" type="Const" version="opset1">
96
  <data element_type="i64" shape="" offset="20" size="8" />
97
  <output>
98
  <port id="0" precision="I64" />
99
  </output>
100
  </layer>
101
+ <layer id="11" name="Range_49263" type="Range" version="opset4">
102
  <data output_type="i32" />
103
  <input>
104
  <port id="0" precision="I64" />
 
111
  </port>
112
  </output>
113
  </layer>
114
+ <layer id="12" name="Constant_49264" type="Const" version="opset1">
115
  <data element_type="i64" shape="" offset="20" size="8" />
116
  <output>
117
  <port id="0" precision="I64" />
118
  </output>
119
  </layer>
120
+ <layer id="13" name="Constant_49265" type="Const" version="opset1">
121
  <data element_type="i64" shape="" offset="20" size="8" />
122
  <output>
123
  <port id="0" precision="I64" />
124
  </output>
125
  </layer>
126
+ <layer id="14" name="Add_49266" type="Add" version="opset1">
127
  <data auto_broadcast="numpy" />
128
  <input>
129
  <port id="0" precision="I64" />
 
133
  <port id="2" precision="I64" />
134
  </output>
135
  </layer>
136
+ <layer id="15" name="Constant_49267" type="Const" version="opset1">
137
  <data element_type="i64" shape="" offset="20" size="8" />
138
  <output>
139
  <port id="0" precision="I64" />
140
  </output>
141
  </layer>
142
+ <layer id="16" name="Range_49268" type="Range" version="opset4">
143
  <data output_type="i32" />
144
  <input>
145
  <port id="0" precision="I64" />
 
152
  </port>
153
  </output>
154
  </layer>
155
+ <layer id="17" name="Constant_49330" type="Const" version="opset1">
156
  <data element_type="u8" shape="5282" offset="28" size="5282" />
157
  <output>
158
  <port id="0" precision="U8">
 
160
  </port>
161
  </output>
162
  </layer>
163
+ <layer id="18" name="SpecialTokensSplit_49331" type="SpecialTokensSplit" version="extension">
164
  <input>
165
  <port id="0" precision="I32">
166
  <dim>-1</dim>
 
202
  </port>
203
  </output>
204
  </layer>
205
+ <layer id="19" name="Constant_49333" type="Const" version="opset1">
206
  <data element_type="u8" shape="1" offset="5310" size="1" />
207
  <output>
208
  <port id="0" precision="U8">
 
210
  </port>
211
  </output>
212
  </layer>
213
+ <layer id="20" name="Constant_49335" type="Const" version="opset1">
214
  <data element_type="u8" shape="3" offset="5311" size="3" />
215
  <output>
216
  <port id="0" precision="U8">
 
218
  </port>
219
  </output>
220
  </layer>
221
+ <layer id="21" name="RegexNormalization_49336" type="RegexNormalization" version="extension">
222
  <data global_replace="true" />
223
  <input>
224
  <port id="0" precision="I32">
 
255
  </port>
256
  </output>
257
  </layer>
258
+ <layer id="22" name="Constant_49338" type="Const" version="opset1">
259
  <data element_type="u8" shape="2955910" offset="5314" size="2955910" />
260
  <output>
261
  <port id="0" precision="U8">
 
263
  </port>
264
  </output>
265
  </layer>
266
+ <layer id="23" name="StringTensorUnpack_49339" type="StringTensorUnpack" version="extension">
267
  <data mode="begins_ends" />
268
  <input>
269
  <port id="0" precision="U8">
 
282
  </port>
283
  </output>
284
  </layer>
285
+ <layer id="24" name="Constant_49344" type="Const" version="opset1">
286
  <data element_type="u8" shape="5031736" offset="2961224" size="5031736" />
287
  <output>
288
  <port id="0" precision="U8">
 
290
  </port>
291
  </output>
292
  </layer>
293
+ <layer id="25" name="StringTensorUnpack_49345" type="StringTensorUnpack" version="extension">
294
  <data mode="begins_ends" />
295
  <input>
296
  <port id="0" precision="U8">
 
309
  </port>
310
  </output>
311
  </layer>
312
+ <layer id="26" name="Constant_49347" type="Const" version="opset1">
313
  <data element_type="u8" shape="4245743" offset="7992960" size="4245743" />
314
  <output>
315
  <port id="0" precision="U8">
 
317
  </port>
318
  </output>
319
  </layer>
320
+ <layer id="27" name="StringTensorUnpack_49348" type="StringTensorUnpack" version="extension">
321
  <data mode="begins_ends" />
322
  <input>
323
  <port id="0" precision="U8">
 
336
  </port>
337
  </output>
338
  </layer>
339
+ <layer id="28" name="Constant_49341" type="Const" version="opset1">
340
  <data element_type="u8" shape="4170" offset="12238703" size="4170" />
341
  <output>
342
  <port id="0" precision="U8">
 
344
  </port>
345
  </output>
346
  </layer>
347
+ <layer id="29" name="StringTensorUnpack_49342" type="StringTensorUnpack" version="extension">
348
  <data mode="begins_ends" />
349
  <input>
350
  <port id="0" precision="U8">
 
363
  </port>
364
  </output>
365
  </layer>
366
+ <layer id="30" name="Constant_49349" type="Const" version="opset1">
367
  <data element_type="i32" shape="216" offset="12242873" size="864" />
368
  <output>
369
  <port id="0" precision="I32">
 
371
  </port>
372
  </output>
373
  </layer>
374
+ <layer id="31" name="BPETokenizer_49350" type="BPETokenizer" version="extension">
375
  <data unk_token="&lt;unk>" fuse_unk="true" suffix_indicator="" end_suffix="" byte_fallback="true" cache_capacity="51200" />
376
  <input>
377
  <port id="0" precision="I32">
 
441
  </port>
442
  </output>
443
  </layer>
444
+ <layer id="32" name="Subtract_49351" type="Subtract" version="opset1">
445
  <data auto_broadcast="numpy" />
446
  <input>
447
  <port id="0" precision="I32">
 
457
  </port>
458
  </output>
459
  </layer>
460
+ <layer id="33" name="Constant_49352" type="Const" version="opset1">
461
  <data element_type="i32" shape="" offset="12243737" size="4" />
462
  <output>
463
  <port id="0" precision="I32" />
464
  </output>
465
  </layer>
466
+ <layer id="34" name="Minimum_49353" type="Minimum" version="opset1">
467
  <data auto_broadcast="numpy" />
468
  <input>
469
  <port id="0" precision="I32">
 
477
  </port>
478
  </output>
479
  </layer>
480
+ <layer id="35" name="Subtract_49354" type="Subtract" version="opset1">
481
  <data auto_broadcast="numpy" />
482
  <input>
483
  <port id="0" precision="I32">
 
493
  </port>
494
  </output>
495
  </layer>
496
+ <layer id="36" name="Constant_49358" type="Const" version="opset1">
497
  <data element_type="i32" shape="2" offset="12" size="8" />
498
  <output>
499
  <port id="0" precision="I32">
 
501
  </port>
502
  </output>
503
  </layer>
504
+ <layer id="37" name="CombineSegments_49359" type="CombineSegments" version="extension">
505
  <input>
506
  <port id="0" precision="I32" />
507
  <port id="1" precision="I32" />
 
542
  </port>
543
  </output>
544
  </layer>
545
+ <layer id="38" name="Subtract_49360" type="Subtract" version="opset1">
546
  <data auto_broadcast="numpy" />
547
  <input>
548
  <port id="0" precision="I32">
 
558
  </port>
559
  </output>
560
  </layer>
561
+ <layer id="39" name="Constant_49361" type="Const" version="opset1">
562
  <data element_type="i32" shape="" offset="0" size="4" />
563
  <output>
564
  <port id="0" precision="I32" />
565
  </output>
566
  </layer>
567
+ <layer id="40" name="ReduceMax_49362" type="ReduceMax" version="opset1">
568
  <data keep_dims="false" />
569
  <input>
570
  <port id="0" precision="I32">
 
576
  <port id="2" precision="I32" />
577
  </output>
578
  </layer>
579
+ <layer id="41" name="Constant_49363" type="Const" version="opset1">
580
  <data element_type="i32" shape="" offset="0" size="4" />
581
  <output>
582
  <port id="0" precision="I32" />
583
  </output>
584
  </layer>
585
+ <layer id="42" name="RaggedToDense_49364" type="RaggedToDense" version="extension">
586
  <data pad_right="false" />
587
  <input>
588
  <port id="0" precision="I32">
 
608
  </port>
609
  </output>
610
  </layer>
611
+ <layer id="43" name="Convert_49365" type="Convert" version="opset1">
612
  <data destination_type="i32" />
613
  <input>
614
  <port id="0" precision="BOOL">
 
623
  </port>
624
  </output>
625
  </layer>
626
+ <layer id="44" name="Convert_49365" type="Convert" version="opset1">
627
  <data destination_type="i64" />
628
  <input>
629
  <port id="0" precision="I32">
 
638
  </port>
639
  </output>
640
  </layer>
641
+ <layer id="46" name="RaggedToDense_49364.0" type="Convert" version="opset1">
642
  <data destination_type="i64" />
643
  <input>
644
  <port id="0" precision="I32">
 
653
  </port>
654
  </output>
655
  </layer>
656
+ <layer id="47" name="Result_49368" type="Result" version="opset1">
657
  <input>
658
  <port id="0" precision="I64">
659
  <dim>-1</dim>
 
661
  </port>
662
  </input>
663
  </layer>
664
+ <layer id="45" name="Result_49370" type="Result" version="opset1">
665
  <input>
666
  <port id="0" precision="I64">
667
  <dim>-1</dim>
 
747
  <edge from-layer="46" from-port="1" to-layer="47" to-port="0" />
748
  </edges>
749
  <rt_info>
750
+ <add_attention_mask value="True" />
751
+ <add_prefix_space />
752
+ <add_special_tokens value="True" />
753
  <bos_token_id value="2" />
754
  <chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '&lt;start_of_turn>' + role + '&#10;' + message['content'] | trim + '&lt;end_of_turn>&#10;' }}{% endfor %}{% if add_generation_prompt %}{{'&lt;start_of_turn>model&#10;'}}{% endif %}" />
755
+ <clean_up_tokenization_spaces />
756
+ <detokenizer_input_type value="i64" />
757
  <eos_token_id value="1" />
758
+ <handle_special_tokens_with_re />
759
+ <number_of_inputs value="1" />
760
+ <openvino_tokenizers_version value="2024.5.0.0" />
761
+ <openvino_version value="2024.5.0" />
762
  <original_tokenizer_class value="&lt;class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" />
763
  <pad_token_id value="0" />
764
+ <sentencepiece_version value="0.2.0" />
765
+ <skip_special_tokens value="True" />
766
+ <streaming_detokenizer value="False" />
767
+ <tiktoken_version value="0.8.0" />
768
+ <tokenizer_output_type value="i64" />
769
+ <tokenizers_version value="0.20.3" />
770
+ <transformers_version value="4.46.3" />
771
+ <use_max_padding value="False" />
772
+ <use_sentencepiece_backend value="False" />
773
+ <utf8_replace_mode />
774
+ <with_detokenizer value="True" />
775
  </rt_info>
776
  </net>