HeshamHaroon's picture
Update app.py
e47d405 verified
raw
history blame
2.56 kB
from gradio import Interface
import gradio as gr
import aranizer
from transformers import AutoTokenizer
import codecs
# Loading tokenizer instances from Transformers
gpt_13b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-13B")
gpt_7b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-7B")
jais_13b_tokenizer = AutoTokenizer.from_pretrained("inception-mbzuai/jais-13b")
# Assuming the existence of get_tokenizer() method for aranizer models in your setup
tokenizers = {
"aranizer_bpe50k": lambda: aranizer.aranizer_bpe50k.get_tokenizer(),
"aranizer_bpe64k": lambda: aranizer.aranizer_bpe64k.get_tokenizer(),
"aranizer_bpe86k": lambda: aranizer.aranizer_bpe86k.get_tokenizer(),
"aranizer_sp32k": lambda: aranizer.aranizer_sp32k.get_tokenizer(),
"aranizer_sp50k": lambda: aranizer.aranizer_sp50k.get_tokenizer(),
"aranizer_sp64k": lambda: aranizer.aranizer_sp64k.get_tokenizer(),
"aranizer_sp86k": lambda: aranizer.aranizer_sp86k.get_tokenizer(),
"FreedomIntelligence/AceGPT-13B": lambda: gpt_13b_tokenizer,
"FreedomIntelligence/AceGPT-7B": lambda: gpt_7b_tokenizer,
"inception-mbzuai/jais-13b": lambda: jais_13b_tokenizer,
}
# Define tokenizer options for dropdown menu
tokenizer_options = list(tokenizers.keys())
def compare_tokenizers(tokenizer_name, text):
# UTF-8 encoding assertion for the input text
text = codecs.decode(text.encode('utf-8'), 'utf-8')
tokenizer = tokenizers[tokenizer_name]()
tokens = tokenizer.tokenize(text)
encoded_output = tokenizer.encode(text, add_special_tokens=True, return_tensors="pt")
decoded_text = tokenizer.decode(encoded_output[0], skip_special_tokens=True)
# Ensuring the tokens are iterated and converted correctly
tokens_utf8 = [codecs.decode(token.encode('utf-8'), 'utf-8', errors='ignore') for token in tokens]
# Preparing and returning results in UTF-8
results = [(tokenizer_name, tokens_utf8, encoded_output.tolist(), decoded_text)]
return results
inputs_component = [
gr.Dropdown(choices=tokenizer_options, label="اختر Tokenizer"),
gr.Textbox(lines=2, placeholder="اكتب النص الخاص بك هنا...", label="النص المدخل")
]
outputs_component = gr.Dataframe(
headers=["Tokenizer", "Tokens", "Encoded Output", "Decoded Text"],
label="النتائج",
)
iface = Interface(
fn=compare_tokenizers,
inputs=inputs_component,
outputs=outputs_component,
title="Arabic Tokenizer Arena",
live=True,
)
iface.launch()