xzuyn commited on
Commit
e3dfd55
1 Parent(s): 2b258aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -26,15 +26,15 @@ def tokenize(input_text):
26
 
27
  results = {
28
  "LLaMa-1/LLaMa-2": llama_tokens,
29
- " LLaMa-3": llama3_tokens,
30
- " Mistral": mistral_tokens,
31
- " GPT-2/GPT-J": gpt2_tokens,
32
- " GPT-NeoX": gpt_neox_tokens,
33
- " Falcon": falcon_tokens,
34
- " Phi": phi2_tokens,
35
- " T5": t5_tokens,
36
- " Gemma": gemma_tokens,
37
- " Command-R": command_r_tokens
38
  }
39
 
40
  # Sort the results in descending order based on token length
@@ -55,5 +55,5 @@ if __name__ == "__main__":
55
  gemma_tokenizer = AutoTokenizer.from_pretrained("alpindale/gemma-2b")
56
  command_r_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-plus")
57
 
58
- iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=10), outputs="text")
59
  iface.launch()
 
26
 
27
  results = {
28
  "LLaMa-1/LLaMa-2": llama_tokens,
29
+ "LLaMa-3": llama3_tokens,
30
+ "Mistral": mistral_tokens,
31
+ "GPT-2/GPT-J": gpt2_tokens,
32
+ "GPT-NeoX": gpt_neox_tokens,
33
+ "Falcon": falcon_tokens,
34
+ "Phi": phi2_tokens,
35
+ "T5": t5_tokens,
36
+ "Gemma": gemma_tokens,
37
+ "Command-R": command_r_tokens
38
  }
39
 
40
  # Sort the results in descending order based on token length
 
55
  gemma_tokenizer = AutoTokenizer.from_pretrained("alpindale/gemma-2b")
56
  command_r_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-plus")
57
 
58
+ iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(label="Token Counts", lines=10), outputs="text")
59
  iface.launch()