HeshamHaroon commited on
Commit
c296fab
1 Parent(s): bd02afc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -5
app.py CHANGED
@@ -9,6 +9,9 @@ gpt_13b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-13
9
  gpt_7b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-7B")
10
  jais_13b_tokenizer = AutoTokenizer.from_pretrained("inception-mbzuai/jais-13b")
11
  arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv2")
 
 
 
12
 
13
  # List of available tokenizers and a dictionary to load them
14
  tokenizer_options = [
@@ -17,7 +20,10 @@ tokenizer_options = [
17
  "FreedomIntelligence/AceGPT-13B",
18
  "FreedomIntelligence/AceGPT-7B",
19
  "inception-mbzuai/jais-13b",
20
- "aubmindlab/bert-base-arabertv2"
 
 
 
21
  ]
22
 
23
  tokenizers = {
@@ -31,15 +37,21 @@ tokenizers = {
31
  "FreedomIntelligence/AceGPT-13B": lambda: gpt_13b_tokenizer,
32
  "FreedomIntelligence/AceGPT-7B": lambda: gpt_7b_tokenizer,
33
  "inception-mbzuai/jais-13b": lambda: jais_13b_tokenizer,
34
- "aubmindlab/bert-base-arabertv2": lambda: arabert_tokenizer
 
 
 
35
  }
36
 
37
  def compare_tokenizers(tokenizer_name, text):
38
  # Handle the transformer tokenizers separately due to API differences
39
- if tokenizer_name in ["FreedomIntelligence/AceGPT-13B", "FreedomIntelligence/AceGPT-7B", "inception-mbzuai/jais-13b", "aubmindlab/bert-base-arabertv2"]:
 
 
 
 
40
  tokenizer = tokenizers[tokenizer_name]()
41
  tokens = tokenizer.tokenize(text)
42
- tokens_arabic = [token.encode('utf-8').decode('utf-8') for token in tokens]
43
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
44
  decoded_text = tokenizer.decode(encoded_output, skip_special_tokens=True)
45
  else:
@@ -48,9 +60,9 @@ def compare_tokenizers(tokenizer_name, text):
48
  tokens = tokenizer.tokenize(text)
49
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
50
  decoded_text = tokenizer.decode(encoded_output)
51
- tokens_arabic = [token.encode('utf-8').decode('utf-8') for token in tokens]
52
 
53
  # Prepare the results to be displayed in HTML format
 
54
  results_html = f"""
55
  <div>
56
  <h3>Tokenizer: {tokenizer_name}</h3>
 
9
  gpt_7b_tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/AceGPT-7B")
10
  jais_13b_tokenizer = AutoTokenizer.from_pretrained("inception-mbzuai/jais-13b")
11
  arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv2")
12
+ meta_llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
13
+ cohere_command_r_v01_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")
14
+ cohere_command_r_plus_tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-plus")
15
 
16
  # List of available tokenizers and a dictionary to load them
17
  tokenizer_options = [
 
20
  "FreedomIntelligence/AceGPT-13B",
21
  "FreedomIntelligence/AceGPT-7B",
22
  "inception-mbzuai/jais-13b",
23
+ "aubmindlab/bert-base-arabertv2",
24
+ "meta-llama/Meta-Llama-3-8B",
25
+ "CohereForAI/c4ai-command-r-v01",
26
+ "CohereForAI/c4ai-command-r-plus"
27
  ]
28
 
29
  tokenizers = {
 
37
  "FreedomIntelligence/AceGPT-13B": lambda: gpt_13b_tokenizer,
38
  "FreedomIntelligence/AceGPT-7B": lambda: gpt_7b_tokenizer,
39
  "inception-mbzuai/jais-13b": lambda: jais_13b_tokenizer,
40
+ "aubmindlab/bert-base-arabertv2": lambda: arabert_tokenizer,
41
+ "meta-llama/Meta-Llama-3-8B": lambda: meta_llama_tokenizer,
42
+ "CohereForAI/c4ai-command-r-v01": lambda: cohere_command_r_v01_tokenizer,
43
+ "CohereForAI/c4ai-command-r-plus": lambda: cohere_command_r_plus_tokenizer
44
  }
45
 
46
  def compare_tokenizers(tokenizer_name, text):
47
  # Handle the transformer tokenizers separately due to API differences
48
+ if tokenizer_name in [
49
+ "FreedomIntelligence/AceGPT-13B", "FreedomIntelligence/AceGPT-7B",
50
+ "inception-mbzuai/jais-13b", "aubmindlab/bert-base-arabertv2",
51
+ "meta-llama/Meta-Llama-3-8B", "CohereForAI/c4ai-command-r-v01", "CohereForAI/c4ai-command-r-plus"
52
+ ]:
53
  tokenizer = tokenizers[tokenizer_name]()
54
  tokens = tokenizer.tokenize(text)
 
55
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
56
  decoded_text = tokenizer.decode(encoded_output, skip_special_tokens=True)
57
  else:
 
60
  tokens = tokenizer.tokenize(text)
61
  encoded_output = tokenizer.encode(text, add_special_tokens=True)
62
  decoded_text = tokenizer.decode(encoded_output)
 
63
 
64
  # Prepare the results to be displayed in HTML format
65
+ tokens_arabic = [token.encode('utf-8').decode('utf-8') if isinstance(token, bytes) else token for token in tokens]
66
  results_html = f"""
67
  <div>
68
  <h3>Tokenizer: {tokenizer_name}</h3>