from gradio import Interface import gradio as gr import aranizer from aranizer import aranizer_bpe50k, aranizer_bpe64k, aranizer_bpe86k, aranizer_sp32k, aranizer_sp50k, aranizer_sp64k, aranizer_sp86k from transformers import AutoTokenizer # Load additional tokenizers from transformers gpt_13b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-13B") gpt_7b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-7B") jais_13b_tokenizer = AutoTokenizer.from_pretrained("inception-mbzuai/jais-13b") arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv2") # List of available tokenizers and a dictionary to load them tokenizer_options = [ "aranizer_bpe50k", "aranizer_bpe64k", "aranizer_bpe86k", "aranizer_sp32k", "aranizer_sp50k", "aranizer_sp64k", "aranizer_sp86k", "FreedomIntelligence/AceGPT-13B", "FreedomIntelligence/AceGPT-7B", "inception-mbzuai/jais-13b", "aubmindlab/bert-base-arabertv2" ] tokenizers = { "aranizer_bpe50k": aranizer_bpe50k.get_tokenizer, "aranizer_bpe64k": aranizer_bpe64k.get_tokenizer, "aranizer_bpe86k": aranizer_bpe86k.get_tokenizer, "aranizer_sp32k": aranizer_sp32k.get_tokenizer, "aranizer_sp50k": aranizer_sp50k.get_tokenizer, "aranizer_sp64k": aranizer_sp64k.get_tokenizer, "aranizer_sp86k": aranizer_sp86k.get_tokenizer, "FreedomIntelligence/AceGPT-13B": lambda: gpt_13b_tokenizer, "FreedomIntelligence/AceGPT-7B": lambda: gpt_7b_tokenizer, "inception-mbzuai/jais-13b": lambda: jais_13b_tokenizer, "aubmindlab/bert-base-arabertv2": lambda: arabert_tokenizer } def compare_tokenizers(tokenizer_name, text): # Handle the transformer tokenizers separately due to API differences if tokenizer_name in ["FreedomIntelligence/AceGPT-13B", "FreedomIntelligence/AceGPT-7B", "inception-mbzuai/jais-13b", "aubmindlab/bert-base-arabertv2"]: tokenizer = tokenizers[tokenizer_name]() tokens = tokenizer.tokenize(text) tokens_arabic = [token.encode('utf-8').decode('utf-8') for token in tokens] encoded_output = tokenizer.encode(text, add_special_tokens=True) decoded_text = tokenizer.decode(encoded_output, skip_special_tokens=True) else: # AraNizer tokenizers tokenizer = tokenizers[tokenizer_name]() tokens = tokenizer.tokenize(text) encoded_output = tokenizer.encode(text, add_special_tokens=True) decoded_text = tokenizer.decode(encoded_output) tokens_arabic = [token.encode('utf-8').decode('utf-8') for token in tokens] # Prepare the results to be displayed in HTML format results_html = f"""
Tokens: {tokens_arabic}
Encoded: {encoded_output}
Decoded: {decoded_text}