xzuyn commited on
Commit
e363f01
1 Parent(s): d357f15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -2,18 +2,6 @@ from transformers import AutoTokenizer
2
  import gradio as gr
3
 
4
 
5
- def load_tokenizers():
6
- llama1_tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
7
- llama2_tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-fp16")
8
- mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
9
- gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2")
10
- gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
11
- falcon_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b")
12
- phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
13
- t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl")
14
-
15
-
16
-
17
  def tokenize(input_text):
18
  llama1_tokens = llama1_tokenizer(input_text, add_special_tokens=True)["input_ids"]
19
  llama2_tokens = llama2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
@@ -23,13 +11,19 @@ def tokenize(input_text):
23
  falcon_tokens = falcon_tokenizer(input_text, add_special_tokens=True)["input_ids"]
24
  phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
25
  t5_tokens = t5_tokenizer(input_text, add_special_tokens=True)["input_ids"]
26
-
27
 
28
  return f"LLaMa-1: {len(llama1_tokens)}\nLLaMa-2: {len(llama2_tokens)}\nMistral: {len(mistral_tokens)}\nGPT-2/GPT-J: {len(gpt2_tokens)}\nGPT-NeoX: {len(gpt_neox_tokens)}\nFalcon: {len(falcon_tokens)}\nPhi-2: {len(phi2_tokens)}\nT5: {len(t5_tokens)}"
29
 
30
 
31
  if __name__ == "__main__":
32
- load_tokenizers()
33
-
 
 
 
 
 
 
 
34
  iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=7), outputs="text")
35
- iface.launch()
 
2
  import gradio as gr
3
 
4
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  def tokenize(input_text):
6
  llama1_tokens = llama1_tokenizer(input_text, add_special_tokens=True)["input_ids"]
7
  llama2_tokens = llama2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
 
11
  falcon_tokens = falcon_tokenizer(input_text, add_special_tokens=True)["input_ids"]
12
  phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"]
13
  t5_tokens = t5_tokenizer(input_text, add_special_tokens=True)["input_ids"]
 
14
 
15
  return f"LLaMa-1: {len(llama1_tokens)}\nLLaMa-2: {len(llama2_tokens)}\nMistral: {len(mistral_tokens)}\nGPT-2/GPT-J: {len(gpt2_tokens)}\nGPT-NeoX: {len(gpt_neox_tokens)}\nFalcon: {len(falcon_tokens)}\nPhi-2: {len(phi2_tokens)}\nT5: {len(t5_tokens)}"
16
 
17
 
18
  if __name__ == "__main__":
19
+ llama1_tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
20
+ llama2_tokenizer = AutoTokenizer.from_pretrained("TheBloke/Llama-2-7B-fp16")
21
+ mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
22
+ gpt2_tokenizer = AutoTokenizer.from_pretrained("gpt2")
23
+ gpt_neox_tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
24
+ falcon_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b")
25
+ phi2_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
26
+ t5_tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-xxl")
27
+
28
  iface = gr.Interface(fn=tokenize, inputs=gr.Textbox(lines=7), outputs="text")
29
+ iface.launch()