Spaces:
Sleeping
Sleeping
| from transformers import AutoTokenizer | |
| import gradio as gr | |
| def formatarr(input): | |
| return "["+",".join(str(x) for x in input)+"]" | |
| def tokenize(input_text): | |
| llama_tokens = llama_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| llama3_tokens = llama3_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| mistral_tokens = mistral_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| gpt2_tokens = gpt2_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| gpt_neox_tokens = gpt_neox_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| falcon_tokens = falcon_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| phi2_tokens = phi2_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| phi3_tokens = phi3_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| t5_tokens = t5_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| gemma_tokens = gemma_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| command_r_tokens = command_r_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| qwen_tokens = qwen_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| codeqwen_tokens = codeqwen_tokenizer(input_text, add_special_tokens=True)["input_ids"] | |
| results = { | |
| "LLaMa-1/LLaMa-2": llama_tokens, | |
| "LLaMa-3": llama3_tokens, | |
| "Mistral": mistral_tokens, | |
| "GPT-2/GPT-J": gpt2_tokens, | |
| "GPT-NeoX": gpt_neox_tokens, | |
| "Falcon": falcon_tokens, | |
| "Phi-1/Phi-2": phi2_tokens, | |
| "Phi-3": phi3_tokens, | |
| "T5": t5_tokens, | |
| "Gemma": gemma_tokens, | |
| "Command-R": command_r_tokens, | |
| "Qwen/Qwen1.5": qwen_tokens, | |
| "CodeQwen": codeqwen_tokens, | |
| } | |
| toks = "" | |
| for model, tokens in results.items(): | |
| toks += f"\n{model} gets {len(tokens)} tokens: {formatarr(tokens)}" | |
| return toks | |
| if __name__ == "__main__": | |
| llama_tokenizer = AutoTokenizer.from_pretrained( | |
| "TheBloke/Llama-2-7B-fp16" | |
| ) | |
| llama3_tokenizer = AutoTokenizer.from_pretrained( | |
| "unsloth/llama-3-8b" | |
| ) | |
| mistral_tokenizer = AutoTokenizer.from_pretrained( | |
| "mistral-community/Mistral-7B-v0.2" | |
| ) | |
| gpt2_tokenizer = AutoTokenizer.from_pretrained( | |
| "gpt2" | |
| ) | |
| gpt_neox_tokenizer = AutoTokenizer.from_pretrained( | |
| "EleutherAI/gpt-neox-20b" | |
| ) | |
| falcon_tokenizer = AutoTokenizer.from_pretrained( | |
| "tiiuae/falcon-7b" | |
| ) | |
| phi2_tokenizer = AutoTokenizer.from_pretrained( | |
| "microsoft/phi-2" | |
| ) | |
| phi3_tokenizer = AutoTokenizer.from_pretrained( | |
| "microsoft/Phi-3-mini-4k-instruct" | |
| ) | |
| t5_tokenizer = AutoTokenizer.from_pretrained( | |
| "google/flan-t5-xxl" | |
| ) | |
| gemma_tokenizer = AutoTokenizer.from_pretrained( | |
| "alpindale/gemma-2b" | |
| ) | |
| command_r_tokenizer = AutoTokenizer.from_pretrained( | |
| "CohereForAI/c4ai-command-r-plus" | |
| ) | |
| qwen_tokenizer = AutoTokenizer.from_pretrained( | |
| "Qwen/Qwen1.5-7B" | |
| ) | |
| codeqwen_tokenizer = AutoTokenizer.from_pretrained( | |
| "Qwen/CodeQwen1.5-7B" | |
| ) | |
| iface = gr.Interface( | |
| fn=tokenize, inputs=gr.Textbox(label="Input Text", lines=12), outputs="text" | |
| ) | |
| iface.launch() | |