HeshamHaroon commited on
Commit
cd02b60
1 Parent(s): 9998155

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import aranizer
4
  from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k
5
 
6
- # Correct way to load all available tokenizers as per the provided usage information
7
  tokenizers = {
8
  "aranizer_bpe50k": aranizer_bpe50k.get_tokenizer(),
9
  "aranizer_bpe64k": aranizer_bpe64k.get_tokenizer(),
@@ -20,13 +20,12 @@ def compare_tokenizers(text):
20
  tokens = tokenizer.tokenize(text)
21
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
22
  decoded_text = tokenizer.decode(encoded_output)
23
- # Collect each tokenizer's results
24
  results.append((name, tokens, encoded_output, decoded_text))
25
  return results
26
 
27
- # Correctly use Gradio's components for inputs and outputs
28
- inputs_component = gr.components.Textbox(lines=2, placeholder="Enter Arabic text here...", label="Input Text")
29
- outputs_component = gr.components.Table(label="Results", headers=["Tokenizer", "Tokens", "Encoded Output", "Decoded Text"])
30
 
31
  # Setting up the interface
32
  iface = Interface(fn=compare_tokenizers, inputs=inputs_component, outputs=outputs_component, title="AraNizer Tokenizer Comparison")
 
3
  import aranizer
4
  from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k
5
 
6
+ # Load all available tokenizers
7
  tokenizers = {
8
  "aranizer_bpe50k": aranizer_bpe50k.get_tokenizer(),
9
  "aranizer_bpe64k": aranizer_bpe64k.get_tokenizer(),
 
20
  tokens = tokenizer.tokenize(text)
21
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
22
  decoded_text = tokenizer.decode(encoded_output)
 
23
  results.append((name, tokens, encoded_output, decoded_text))
24
  return results
25
 
26
+ # Define the Gradio interface components using correct syntax
27
+ inputs_component = gr.Textbox(lines=2, placeholder="Enter Arabic text here...", label="Input Text")
28
+ outputs_component = gr.Dataframe(headers=["Tokenizer", "Tokens", "Encoded Output", "Decoded Text"], label="Results")
29
 
30
  # Setting up the interface
31
  iface = Interface(fn=compare_tokenizers, inputs=inputs_component, outputs=outputs_component, title="AraNizer Tokenizer Comparison")