mjbuehler commited on
Commit
1553463
1 Parent(s): ad5ea1c

Upload processor

Browse files
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<end_of_utterance>": 32002,
3
+ "<fake_token_around_image>": 32000,
4
+ "<image>": 32001
5
+ }
special_tokens_map.json CHANGED
@@ -1,26 +1,8 @@
1
  {
2
  "additional_special_tokens": [
3
- {
4
- "content": "<fake_token_around_image>",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false
9
- },
10
- {
11
- "content": "<image>",
12
- "lstrip": false,
13
- "normalized": false,
14
- "rstrip": false,
15
- "single_word": false
16
- },
17
- {
18
- "content": "<end_of_utterance>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  ],
25
  "bos_token": {
26
  "content": "<s>",
 
1
  {
2
  "additional_special_tokens": [
3
+ "<fake_token_around_image>",
4
+ "<image>",
5
+ "<end_of_utterance>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  ],
7
  "bos_token": {
8
  "content": "<s>",
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json CHANGED
@@ -59,9 +59,8 @@
59
  "bos_token": "<s>",
60
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}",
61
  "clean_up_tokenization_spaces": false,
62
- "do_image_splitting": true,
63
  "eos_token": "</s>",
64
- "legacy": true,
65
  "model_max_length": 1000000000000000019884624838656,
66
  "pad_token": "<unk>",
67
  "processor_class": "Idefics2Processor",
 
59
  "bos_token": "<s>",
60
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}",
61
  "clean_up_tokenization_spaces": false,
 
62
  "eos_token": "</s>",
63
+ "legacy": false,
64
  "model_max_length": 1000000000000000019884624838656,
65
  "pad_token": "<unk>",
66
  "processor_class": "Idefics2Processor",