Commit
•
06b5f1f
1
Parent(s):
885173e
Add tool use template
Browse files- README.md +10 -12
- tokenizer.json +9 -0
- tokenizer_config.json +19 -2
README.md
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
3 |
tags:
|
4 |
- Llama-3
|
5 |
- instruct
|
@@ -14,25 +16,21 @@ tags:
|
|
14 |
- json mode
|
15 |
- axolotl
|
16 |
- merges
|
17 |
-
|
18 |
-
- name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge
|
19 |
-
results: []
|
20 |
-
language:
|
21 |
-
- en
|
22 |
datasets:
|
23 |
- teknium/OpenHermes-2.5
|
24 |
widget:
|
25 |
- example_title: Hermes 2 Pro Llama-3 Instruct Merge
|
26 |
messages:
|
27 |
- role: system
|
28 |
-
content:
|
29 |
-
|
30 |
-
to teach and assist me.
|
31 |
- role: user
|
32 |
-
content:
|
33 |
-
Write a short story about Goku discovering kirby has teamed up with Majin
|
34 |
Buu to destroy the world.
|
35 |
-
|
|
|
|
|
36 |
---
|
37 |
# - Hermes-2 Θ Llama-3 8B
|
38 |
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license: apache-2.0
|
5 |
tags:
|
6 |
- Llama-3
|
7 |
- instruct
|
|
|
16 |
- json mode
|
17 |
- axolotl
|
18 |
- merges
|
19 |
+
base_model: NousResearch/Hermes-2-Pro-Llama-3-8B
|
|
|
|
|
|
|
|
|
20 |
datasets:
|
21 |
- teknium/OpenHermes-2.5
|
22 |
widget:
|
23 |
- example_title: Hermes 2 Pro Llama-3 Instruct Merge
|
24 |
messages:
|
25 |
- role: system
|
26 |
+
content: You are a sentient, superintelligent artificial general intelligence,
|
27 |
+
here to teach and assist me.
|
|
|
28 |
- role: user
|
29 |
+
content: Write a short story about Goku discovering kirby has teamed up with Majin
|
|
|
30 |
Buu to destroy the world.
|
31 |
+
model-index:
|
32 |
+
- name: Hermes-2-Pro-Llama-3-Instruct-8B-Merge
|
33 |
+
results: []
|
34 |
---
|
35 |
# - Hermes-2 Θ Llama-3 8B
|
36 |
|
tokenizer.json
CHANGED
@@ -2306,6 +2306,15 @@
|
|
2306 |
"rstrip": false,
|
2307 |
"normalized": false,
|
2308 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2309 |
}
|
2310 |
],
|
2311 |
"normalizer": null,
|
|
|
2306 |
"rstrip": false,
|
2307 |
"normalized": false,
|
2308 |
"special": true
|
2309 |
+
},
|
2310 |
+
{
|
2311 |
+
"id": 128256,
|
2312 |
+
"content": "<tool_response>",
|
2313 |
+
"single_word": false,
|
2314 |
+
"lstrip": false,
|
2315 |
+
"rstrip": false,
|
2316 |
+
"normalized": false,
|
2317 |
+
"special": false
|
2318 |
}
|
2319 |
],
|
2320 |
"normalizer": null,
|
tokenizer_config.json
CHANGED
@@ -41,7 +41,7 @@
|
|
41 |
"special": false
|
42 |
},
|
43 |
"128005": {
|
44 |
-
"content": "
|
45 |
"lstrip": false,
|
46 |
"normalized": false,
|
47 |
"rstrip": false,
|
@@ -2047,10 +2047,27 @@
|
|
2047 |
"rstrip": false,
|
2048 |
"single_word": false,
|
2049 |
"special": true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2050 |
}
|
2051 |
},
|
2052 |
"bos_token": "<|begin_of_text|>",
|
2053 |
-
"chat_template":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2054 |
"clean_up_tokenization_spaces": true,
|
2055 |
"eos_token": "<|im_end|>",
|
2056 |
"model_input_names": [
|
|
|
41 |
"special": false
|
42 |
},
|
43 |
"128005": {
|
44 |
+
"content": "<|reserved_special_token_3|>",
|
45 |
"lstrip": false,
|
46 |
"normalized": false,
|
47 |
"rstrip": false,
|
|
|
2047 |
"rstrip": false,
|
2048 |
"single_word": false,
|
2049 |
"special": true
|
2050 |
+
},
|
2051 |
+
"128256": {
|
2052 |
+
"content": "<tool_response>",
|
2053 |
+
"lstrip": false,
|
2054 |
+
"normalized": false,
|
2055 |
+
"rstrip": false,
|
2056 |
+
"single_word": false,
|
2057 |
+
"special": false
|
2058 |
}
|
2059 |
},
|
2060 |
"bos_token": "<|begin_of_text|>",
|
2061 |
+
"chat_template": [
|
2062 |
+
{
|
2063 |
+
"name": "default",
|
2064 |
+
"template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
2065 |
+
},
|
2066 |
+
{
|
2067 |
+
"name": "tool_use",
|
2068 |
+
"template": "{%- macro json_to_python_type(json_spec) %}\n{%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n\n{%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n{%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\"}}\n{%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']'}}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n{%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }} \n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n{%- else %}\n {{- \"Any\" }}\n{%- endif %}\n{%- endmacro %}\n\n\n{{- bos_token }}\n{{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n{%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": ' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\\n\\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\\n Returns:\\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\\n\" }}\n {%- endif %}\n{%- endfor %}\n{{- \" </tools>\" }}\n{{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}, \"name\": {\"title\": \"Name\", \"type\": \"string\"}}, \"required\": [\"arguments\", \"name\"], \"title\": \"FunctionCall\", \"type\": \"object\"}\n' }}\n{{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n{{- \"<tool_call>\n\" }}\n{{- '{\"arguments\": <args-dict>, \"name\": <function-name>}\n' }}\n{{- '</tool_call><|im_end|>' }}\n{%- for message in messages %}\n {%- if message.role == \"user\" or message.role == \"system\" or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role + '\\n<tool_call>\\n' }}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{ ' }}\n {%- if tool_call.arguments is defined %}\n {{- '\"arguments\": ' }}\n {{- tool_call.arguments|tojson }}\n {{- ', '}}\n {%- endif %}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\"}' }}\n {{- '\\n</tool_call> ' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if not message.name is defined %}\n {{- raise_exception(\"Tool response dicts require a 'name' key indicating the name of the called function!\") }}\n {%- endif %}\n {{- '<|im_start|>' + message.role + '\\n<tool_response>\\n' }}\n {{- '{\"name\": \"' }}\n {{- message.name }}\n {{- '\", \"content\": ' }}\n {{- message.content|tojson + '}' }}\n {{- '\\n</tool_response> <|im_end|>\\n' }} \n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n"
|
2069 |
+
}
|
2070 |
+
],
|
2071 |
"clean_up_tokenization_spaces": true,
|
2072 |
"eos_token": "<|im_end|>",
|
2073 |
"model_input_names": [
|