gemma-2b-it-fp16-ov / openvino_detokenizer.xml
openvino-ci's picture
Upload folder using huggingface_hub
c326c08 verified
raw
history blame
7.36 kB
<?xml version="1.0"?>
<net name="detokenizer" version="11">
<layers>
<layer id="0" name="Parameter_69827" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="Parameter_69827">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="1" name="Convert_69844" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="2" name="Constant_69794" type="Const" version="opset1">
<data element_type="u8" shape="2955910" offset="0" size="2955910" />
<output>
<port id="0" precision="U8">
<dim>2955910</dim>
</port>
</output>
</layer>
<layer id="3" name="StringTensorUnpack_69795" type="StringTensorUnpack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="U8">
<dim>2955910</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="4" name="VocabDecoder_69828" type="VocabDecoder" version="extension">
<data skip_tokens="0, 1, 2, 3, 106, 107" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="I32">
<dim>-1</dim>
</port>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
<port id="6" precision="I32">
<dim>-1</dim>
</port>
<port id="7" precision="I32">
<dim>-1</dim>
</port>
<port id="8" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="5" name="Constant_69830" type="Const" version="opset1">
<data element_type="u8" shape="3" offset="2955910" size="3" />
<output>
<port id="0" precision="U8">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="6" name="Constant_69832" type="Const" version="opset1">
<data element_type="u8" shape="1" offset="2955913" size="1" />
<output>
<port id="0" precision="U8">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="7" name="RegexNormalization_69833" type="RegexNormalization" version="extension">
<data global_replace="true" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
<port id="3" precision="U8">
<dim>3</dim>
</port>
<port id="4" precision="U8">
<dim>1</dim>
</port>
</input>
<output>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
<port id="6" precision="I32">
<dim>-1</dim>
</port>
<port id="7" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="8" name="ByteFallback_69834" type="ByteFallback" version="extension">
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="I32">
<dim>-1</dim>
</port>
<port id="4" precision="I32">
<dim>-1</dim>
</port>
<port id="5" precision="U8">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="9" name="FuzeRagged_69835" type="FuzeRagged" version="extension">
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="I32">
<dim>-1</dim>
</port>
<port id="3" precision="I32">
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="I32">
<dim>-1</dim>
</port>
<port id="5" precision="I32">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="10" name="StringTensorPack_69836" type="StringTensorPack" version="extension">
<data mode="begins_ends" />
<input>
<port id="0" precision="I32">
<dim>-1</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
</port>
<port id="2" precision="U8">
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="STRING" names="string_output">
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="11" name="Result_69837" type="Result" version="opset1">
<input>
<port id="0" precision="STRING">
<dim>-1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="0" />
<edge from-layer="1" from-port="1" to-layer="4" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="3" to-port="0" />
<edge from-layer="3" from-port="1" to-layer="4" to-port="1" />
<edge from-layer="3" from-port="2" to-layer="4" to-port="2" />
<edge from-layer="3" from-port="3" to-layer="4" to-port="3" />
<edge from-layer="4" from-port="6" to-layer="7" to-port="0" />
<edge from-layer="4" from-port="7" to-layer="7" to-port="1" />
<edge from-layer="4" from-port="8" to-layer="7" to-port="2" />
<edge from-layer="4" from-port="5" to-layer="9" to-port="1" />
<edge from-layer="4" from-port="4" to-layer="9" to-port="0" />
<edge from-layer="5" from-port="0" to-layer="7" to-port="3" />
<edge from-layer="6" from-port="0" to-layer="7" to-port="4" />
<edge from-layer="7" from-port="5" to-layer="8" to-port="0" />
<edge from-layer="7" from-port="7" to-layer="8" to-port="2" />
<edge from-layer="7" from-port="6" to-layer="8" to-port="1" />
<edge from-layer="8" from-port="3" to-layer="9" to-port="2" />
<edge from-layer="8" from-port="4" to-layer="9" to-port="3" />
<edge from-layer="8" from-port="5" to-layer="10" to-port="2" />
<edge from-layer="9" from-port="4" to-layer="10" to-port="0" />
<edge from-layer="9" from-port="5" to-layer="10" to-port="1" />
<edge from-layer="10" from-port="3" to-layer="11" to-port="0" />
</edges>
<rt_info>
<bos_token_id value="2" />
<chat_template value="{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '&lt;start_of_turn>' + role + '&#10;' + message['content'] | trim + '&lt;end_of_turn>&#10;' }}{% endfor %}{% if add_generation_prompt %}{{'&lt;start_of_turn>model&#10;'}}{% endif %}" />
<eos_token_id value="1" />
<original_tokenizer_class value="&lt;class 'transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast'>" />
<pad_token_id value="0" />
</rt_info>
</net>