Spaces:
Sleeping
Sleeping
TuringsSolutions
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -17,28 +17,31 @@ tokenizer_names = [
|
|
17 |
# Load all the tokenizers
|
18 |
tokenizers = {name: AutoTokenizer.from_pretrained(name) for name in tokenizer_names}
|
19 |
|
20 |
-
def generate_responses(prompt):
|
21 |
responses = {}
|
22 |
-
for name
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
30 |
return responses
|
31 |
|
32 |
-
# Gradio interface setup
|
33 |
interface = gr.Interface(
|
34 |
fn=generate_responses,
|
35 |
-
inputs=
|
|
|
|
|
|
|
36 |
outputs=gr.JSON(),
|
37 |
title="Tokenizer Comparison",
|
38 |
description="Compare model outputs with different tokenizers"
|
39 |
)
|
40 |
|
41 |
# Launch the Gradio interface
|
42 |
-
interface.launch()
|
43 |
-
|
44 |
-
|
|
|
17 |
# Load all the tokenizers
|
18 |
tokenizers = {name: AutoTokenizer.from_pretrained(name) for name in tokenizer_names}
|
19 |
|
20 |
+
def generate_responses(prompt, selected_tokenizers):
|
21 |
responses = {}
|
22 |
+
for name in selected_tokenizers:
|
23 |
+
tokenizer = tokenizers.get(name)
|
24 |
+
if tokenizer:
|
25 |
+
try:
|
26 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
27 |
+
outputs = model.generate(**inputs)
|
28 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
29 |
+
responses[name] = response
|
30 |
+
except Exception as e:
|
31 |
+
responses[name] = f"Error: {str(e)}"
|
32 |
return responses
|
33 |
|
34 |
+
# Gradio interface setup with checkboxes for tokenizers
|
35 |
interface = gr.Interface(
|
36 |
fn=generate_responses,
|
37 |
+
inputs=[
|
38 |
+
gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
|
39 |
+
gr.CheckboxGroup(choices=tokenizer_names, label="Select tokenizers to use")
|
40 |
+
],
|
41 |
outputs=gr.JSON(),
|
42 |
title="Tokenizer Comparison",
|
43 |
description="Compare model outputs with different tokenizers"
|
44 |
)
|
45 |
|
46 |
# Launch the Gradio interface
|
47 |
+
interface.launch()
|
|
|
|