File size: 3,590 Bytes
08fc296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import gradio as gr
from transformers import AutoTokenizer
import json
import os
from huggingface_hub import login

HUGGINGFACEHUB_API_TOKEN = os.environ.get("HF_TOKEN")

demo_conversation = """[
    {"role": "system", "content": "You are a helpful chatbot."},
    {"role": "user", "content": "Hi there!"},
    {"role": "assistant", "content": "Hello, human!"},
    {"role": "user", "content": "Can I ask a question?"}
]"""

description_text = """# Chat Template Viewer
### This space is a helper to learn more about [Chat Templates](https://huggingface.co/docs/transformers/main/en/chat_templating).
"""

default_tools = [{"type": "function", "function": {"name":"get_current_weather", "description": "Get▁the▁current▁weather", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "format": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location."}},"required":["location","format"]}}}]

# render the tool use prompt as a string:
def get_template_names(model_name):
    try:
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        if isinstance(tokenizer.chat_template, dict):
            return list(tokenizer.chat_template.keys())
        else:
            return []
    except Exception as e:
        return "None"

def update_template_dropdown(model_name):
    template_names = get_template_names(model_name)
    if template_names:
        return gr.update(choices=template_names, value=None)

def apply_chat_template(model_name, test_conversation, add_generation_prompt, cleanup_whitespace, template_name, hf_token, kwargs):
    try:
        login(token=hf_token)
        tokenizer = AutoTokenizer.from_pretrained(model_name)
    except:
        return f"model {model_name} could not be loaded or invalid HF token"
    try:
        outputs = []
        conversation = json.loads(test_conversation)
        
        template = tokenizer.chat_template.get(template_name) if template_name else None
        print(kwargs)
        formatted = tokenizer.apply_chat_template(conversation, chat_template=template, tokenize=False, add_generation_prompt=add_generation_prompt, tools=default_tools)
        return formatted
    except Exception as e:
        return str(e)

with gr.Blocks() as demo:
    model_name_input = gr.Textbox(label="Model Name", placeholder="Enter model name")
    template_dropdown = gr.Dropdown(label="Template Name", choices=[], interactive=True)
    conversation_input = gr.TextArea(value=demo_conversation, lines=6, label="Conversation")
    add_generation_prompt_checkbox = gr.Checkbox(value=False, label="Add generation prompt")
    cleanup_whitespace_checkbox = gr.Checkbox(value=True, label="Cleanup template whitespace")
    hf_token_input = gr.Textbox(label="Hugging Face Token (optional)", placeholder="Enter your HF token")
    kwargs_input = gr.JSON(label="Additional kwargs", value=default_tools, render=False)
    output = gr.TextArea(label="Formatted conversation")

    model_name_input.change(fn=update_template_dropdown, inputs=model_name_input, outputs=template_dropdown)
    gr.Interface(
        description=description_text,
        fn=apply_chat_template,
        inputs=[
            model_name_input,
            conversation_input,
            add_generation_prompt_checkbox,
            cleanup_whitespace_checkbox,
            template_dropdown,
            hf_token_input,
            kwargs_input
        ],
        outputs=output
    )

demo.launch()