leonardlin winglian commited on
Commit
7c2bf30
1 Parent(s): 22ae21a

Fix llama3 chat_template (extra <|eot_id|> on last turn) (#1635)

Browse files

* Fix llama3 chat_template (the {{eos_token}} leads to an extra <|eot_id|> being added in the last turn). Output now matches official Llama 3 Instruct model

* add tests

* chore: lint

---------

Co-authored-by: Wing Lian <wing.lian@gmail.com>

src/axolotl/utils/chat_templates.py CHANGED
@@ -24,7 +24,7 @@ def chat_templates(user_choice: str):
24
  "chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
25
  "gemma": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
26
  "cohere": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}",
27
- "llama3": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% else %}{{ eos_token }}{% endif %}",
28
  }
29
 
30
  if user_choice in templates:
 
24
  "chatml": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
25
  "gemma": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
26
  "cohere": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}",
27
+ "llama3": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
28
  }
29
 
30
  if user_choice in templates:
tests/prompt_strategies/test_chat_templates.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ tests for chat_template prompt strategy
3
+ """
4
+ import unittest
5
+
6
+ import pytest
7
+ from datasets import Dataset
8
+ from transformers import AutoTokenizer
9
+
10
+ from axolotl.prompt_strategies.chat_template import (
11
+ ChatTemplatePrompter,
12
+ ChatTemplateStrategy,
13
+ )
14
+ from axolotl.utils.chat_templates import chat_templates
15
+
16
+
17
+ @pytest.fixture(name="sharegpt_dataset")
18
+ def fixture_sharegpt_dataset():
19
+ # pylint: disable=duplicate-code
20
+ return Dataset.from_list(
21
+ [
22
+ {
23
+ "conversations": [
24
+ {
25
+ "from": "human",
26
+ "value": "hello",
27
+ },
28
+ {
29
+ "from": "gpt",
30
+ "value": "hello",
31
+ },
32
+ {
33
+ "from": "human",
34
+ "value": "goodbye",
35
+ },
36
+ {
37
+ "from": "gpt",
38
+ "value": "goodbye",
39
+ },
40
+ ]
41
+ }
42
+ ]
43
+ )
44
+
45
+
46
+ @pytest.fixture(name="llama3_tokenizer")
47
+ def fixture_llama3_tokenizer():
48
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Meta-Llama-3-8B")
49
+ tokenizer.eos_token = "<|eot_id|>"
50
+
51
+ return tokenizer
52
+
53
+
54
+ class TestSharegptChatTemplateLlama3:
55
+ """
56
+ Test class for ShareGPT style datasets with llama-3 prompts using the chat_template strategy.
57
+ """
58
+
59
+ def test_llama3(self, llama3_tokenizer, sharegpt_dataset):
60
+ # pylint: disable=duplicate-code
61
+ strategy = ChatTemplateStrategy(
62
+ ChatTemplatePrompter(llama3_tokenizer, chat_templates("llama3")),
63
+ llama3_tokenizer,
64
+ False,
65
+ 512,
66
+ )
67
+ res = strategy.tokenize_prompt(sharegpt_dataset[0])
68
+ input_ids = res["input_ids"]
69
+ # fmt: off
70
+ assert input_ids == [
71
+ 128000, # bos
72
+ 128006, 882, 128007, # user header
73
+ 271, 15339, 128009, # user prompt eot
74
+ 128006, 78191, 128007, # assistant header
75
+ 271, 15339, 128009, # assistant response eot
76
+ 128006, 882, 128007,
77
+ 271, 19045, 29474, 128009,
78
+ 128006, 78191, 128007,
79
+ 271, 19045, 29474, 128009,
80
+ ]
81
+ # fmt: on
82
+
83
+
84
+ if __name__ == "__main__":
85
+ unittest.main()