shreeyad commited on
Commit
5a829cf
1 Parent(s): e95868a

code for Beyond the ABCs project

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/aya_dataset_features.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ data/aya_dataset_features_amharic.csv
2
+ data/aya_dataset_segments.csv
3
+ data/aya_dataset_features_large.csv
4
+
5
+ scripts/
6
+ vocab/
7
+ data/.DS_Store
8
+ .DS_Store
.streamlit/config.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [theme]
2
+ base="light"
3
+ primaryColor="#1d5965"
4
+ textColor="#1d5965"
5
+
6
+
app.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ import re
4
+ import time
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+ from transformers import AutoTokenizer
9
+ import tiktoken
10
+
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+
14
+ import grapheme
15
+ from unicodedata import category
16
+ from numpy.linalg import LinAlgError
17
+
18
+
19
+
20
+ class TokenizerAnalyzer:
21
+ def __init__(self):
22
+ self.tokenizers = {}
23
+
24
+ def add_tokenizer(self, name, model_name):
25
+ self.tokenizers[name] = model_name
26
+
27
+ def tokenize_text(self, tokenizer_name, text):
28
+ start_time = time.time()
29
+ if tokenizer_name == "gpt-4":
30
+ tokenizer = tiktoken.encoding_for_model(tokenizer_name)
31
+ tokens = tokenizer.encode(text)
32
+ else:
33
+ tokenizer = AutoTokenizer.from_pretrained(self.tokenizers[tokenizer_name])
34
+ tokens = tokenizer.tokenize(text)
35
+ end_time = time.time()
36
+ tokenization_time = end_time - start_time
37
+ return tokens, tokenization_time
38
+
39
+
40
+ def analyze_vocab(self, vocab_file):
41
+ latin_count = 0
42
+ non_latin_count = 0
43
+ latin_total_length = 0
44
+ non_latin_total_length = 0
45
+ incomplete_bytes_count = 0
46
+
47
+ # Regular expression to match sequences starting with '\\x'
48
+ incomplete_bytes_regex = special_char_regex = re.compile(r"(?<!\\)(\\x|\\\\x)")
49
+
50
+ with open(vocab_file, 'r') as f:
51
+ for line in f:
52
+ token = re.sub(r"^(?P<quote>['\"])(.*?)(?P=quote)$", r"\2", line)
53
+ if not "gpt-4" in vocab_file:
54
+ token = re.sub("_", "", token)
55
+ token = token.strip()
56
+ is_latin = True
57
+ token_length = len(token)
58
+
59
+ # Check for special character sequence at the beginning of the token
60
+ if incomplete_bytes_regex.match(token):
61
+ incomplete_bytes_count += 1
62
+ continue # Skip further processing for this token
63
+
64
+ for char in token:
65
+ char_category = category(char)
66
+ if char_category != "Ll" and char_category != "Lu": # Check for non-Latin characters
67
+ is_latin = False
68
+ break # Exit the inner loop if a Latin character is found
69
+
70
+ # Process token based on its category
71
+ if is_latin:
72
+ latin_count += 1
73
+ latin_total_length += token_length
74
+ else:
75
+ non_latin_count += 1
76
+ non_latin_total_length += token_length
77
+
78
+ # non_latin_count += incomplete_hex_count
79
+ #average length doe not make sense because there are tokens like: /****************************************************************
80
+ # non_latin_count also includes cases like .WaitFor
81
+ return {
82
+ "latin": latin_count,
83
+ "non_latin": non_latin_count,
84
+ "incomplete_bytes": incomplete_bytes_count
85
+ }
86
+
87
+
88
+ def visualize_tokens(self, text, tokenizer):
89
+
90
+ if tokenizer =="gpt-4":
91
+ tokenizer = tiktoken.encoding_for_model(tokenizer)
92
+ token_ids = tokenizer.encode(text)
93
+ graphemes = list(grapheme.graphemes(text))
94
+ # token_ids, str_tokens = [], []
95
+ # for grapheme_ in graphemes:
96
+
97
+ # token_id = tokenizer.encode(grapheme_)
98
+ # str_tokens.append(tokenizer.decode(token_id))
99
+ # token_ids.append(token_id)
100
+ str_tokens = []
101
+ for token in token_ids:
102
+ str_tokens.append(tokenizer.decode([token], errors="backslashreplace"))
103
+ else:
104
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer)
105
+ tokens = tokenizer.tokenize(text)
106
+ str_tokens = []
107
+ for token in tokens:
108
+ str_tokens.append(tokenizer.convert_tokens_to_string([token]))
109
+ token_ids = tokenizer.convert_tokens_to_ids(tokens)
110
+
111
+ colors = ['#ffdab9', '#e6ee9c', '#9cddc8', '#bcaaa4', '#c5b0d5']
112
+
113
+ html = ""
114
+ for i, token in enumerate(str_tokens):
115
+ color = colors[i % len(colors)]
116
+ html += f'<mark title="{token}" style="background-color: {color};">{token}</mark>'
117
+
118
+ st.write("Token IDs:", token_ids)
119
+ st.write(html, unsafe_allow_html=True)
120
+
121
+ def plot_vocab_counts(self, vocab_count_dict):
122
+
123
+ outer_keys = list(vocab_count_dict.keys())
124
+ inner_keys = list(vocab_count_dict[outer_keys[0]].keys())
125
+ values = [[vocab[key] for key in inner_keys] for vocab in vocab_count_dict.values()]
126
+
127
+ x = outer_keys
128
+ num_groups = len(x)
129
+ pastel_palette = sns.color_palette("pastel", num_groups)
130
+
131
+ fig, ax = plt.subplots(figsize=(10, 6))
132
+ bar_width = 0.8 / num_groups
133
+ x_pos = [i + (1 - 0.8) / 2 for i in range(num_groups)]
134
+ for i, y_values in enumerate(values):
135
+ x_val = [x_pos[j] + bar_width * i for j in range(num_groups)]
136
+ ax.bar(x_val, y_values, width=bar_width, label=x[i], color=pastel_palette[i])
137
+
138
+ for j, value in enumerate(y_values):
139
+ ax.annotate(str(value), xy=(x_val[j], value), xytext=(0, 3),
140
+ textcoords="offset points", ha='center', va='bottom')
141
+
142
+ ax.set_ylabel('Count')
143
+ ax.set_title('Vocabulary Counts')
144
+ ax.set_xticks(x_pos)
145
+ ax.set_xticklabels(inner_keys, rotation=45, ha='right')
146
+ ax.legend(title='Vocabularies', loc='upper right')
147
+
148
+ st.pyplot(fig)
149
+
150
+ def draw_plots(self, df, tokenizer, selected_languages):
151
+
152
+ pastel_palette = sns.color_palette("pastel")
153
+
154
+ df_selected = df[df['language'].isin(selected_languages)]
155
+
156
+ plot_titles = [f"Time taken to tokenize across languages by {tokenizer}", f"Token Distribution across languages for {tokenizer}", f"Replacement Tokens distribution across languages for {tokenizer}"]
157
+
158
+ df_columns = [f"{tokenizer}_Time", f"{tokenizer}_TokensCount", f"{tokenizer}_ReplTokensCount"]
159
+
160
+ for i, column in enumerate(df_columns):
161
+ plt.figure(figsize=(10, 6))
162
+ try:
163
+ sns.histplot(data=df_selected, x=column, hue="language", palette=pastel_palette, kde=True, element="step", stat="density")
164
+ if df_selected[column].nunique() > 1 and not df_selected[column].isnull().all():
165
+ # Calculate mean and median
166
+ try:
167
+ mean_value = df_selected[column].mean()
168
+ median_value = df_selected[column].median()
169
+
170
+ # Add vertical lines for mean and median
171
+ plt.axvline(mean_value, color='red', linestyle='--', label=f'Mean: {mean_value:.2f}')
172
+ plt.axvline(median_value, color='blue', linestyle='--', label=f'Median: {median_value:.2f}')
173
+
174
+ # Add legend with only mean and median
175
+ plt.legend()
176
+ except LinAlgError:
177
+ st.warning("Singular matrix encountered. Skipping mean and median calculation.")
178
+
179
+
180
+ plt.title(plot_titles[i])
181
+ plt.xlabel(column.split("_")[1])
182
+ plt.ylabel("Density")
183
+ plt.xticks(rotation=45)
184
+ st.pyplot(plt.gcf())
185
+
186
+ except Exception as e:
187
+ st.error(f"Can't Draw plot for {column}. Singular matrix encountered. Statistical measures cannot be calculated.")
188
+
189
+
190
+ plt.figure(figsize=(10, 6))
191
+ sns.scatterplot(data=df_selected, x="GraphemesCount", y=f"{tokenizer}_TokensCount", hue="language", palette=pastel_palette)
192
+ plt.title(f"Graphemes vs. Token Counts across languages for {tokenizer}")
193
+ plt.xlabel("Graphemes Count")
194
+ plt.ylabel("Token Count")
195
+ plt.tight_layout()
196
+ st.pyplot(plt.gcf())
197
+
198
+
199
+
200
+
201
+
202
+ def playground_tab(analyzer):
203
+ st.title("Tokenization Visualizer for Language Models")
204
+ st.markdown("""
205
+ You can use this playgorund to visualize tokens generated by the tokenizers used by popular language models.
206
+ """)
207
+
208
+ tokenizer_name = st.selectbox("Choose a Tokenizer", options=list(analyzer.tokenizers.keys()))
209
+
210
+ text_input = st.text_area("Enter text below to visualize tokens:", height=300)
211
+
212
+ if st.button("Tokenize"):
213
+ if text_input.strip():
214
+ analyzer.visualize_tokens(text_input, analyzer.tokenizers[tokenizer_name])
215
+ else:
216
+ st.error("Please enter some text.")
217
+
218
+ def analysis_tab(analyzer):
219
+
220
+ st.title("Tokenizer Performance Analysis for Language Models")
221
+ st.markdown("""
222
+ You can use this visualizer to understand how tokenizers work across several languages. The default configuration shows results for English, French, Spanish, Hindi, Nepali.
223
+ """)
224
+
225
+ dataset_df = pd.read_csv("data/aya_dataset_features.csv")
226
+
227
+ available_tokenizers = list(analyzer.tokenizers.keys())
228
+ default_tokenizer = available_tokenizers[0] # Change this as per your requirement
229
+ selected_tokenizer = st.sidebar.selectbox("Select Tokenizer", options=available_tokenizers, index=available_tokenizers.index(default_tokenizer))
230
+
231
+ languages = dataset_df["language"].unique()
232
+ default_languages = ["English", "French", "Spanish", "Hindi", "Nepali (individual language)"]
233
+ selected_languages = st.sidebar.multiselect("Select Languages", languages, default=default_languages)
234
+
235
+
236
+ analyzer.draw_plots(dataset_df, selected_tokenizer, selected_languages)
237
+
238
+ # Time, Memory --> across languages across tokenizers
239
+ # replacement tokens count - across languages across tokenizers
240
+ # token distribution - across languages across tokenizers
241
+ # graphemes v/s byte counts across languages
242
+ # graphemes v/s token counts across languages
243
+
244
+ #Vocab counts visualization
245
+ st.subheader("Latin v/s Non-Latin Entries in Vocab")
246
+ st.markdown("""
247
+ GPT-4 **cl100k_base.tiktoken** vocab contains:
248
+ - 70,988 entries containing only Latin characters
249
+ - 29,268 entries containing at least one non-Latin character
250
+ - 803 entries with partial byte sequences
251
+ """)
252
+ vocab_path = ["vocab/gpt-4-vocab.txt", "vocab/nllb-vocab.txt", "vocab/roberta-vocab.txt"]
253
+ vocab_count_dicts = {}
254
+ for vocab in vocab_path:
255
+ vocab_name = vocab.split("/")[-1].split(".")[0]
256
+ vocab_count_dict = analyzer.analyze_vocab(vocab)
257
+ vocab_count_dicts[vocab_name] = vocab_count_dict
258
+ analyzer.plot_vocab_counts(vocab_count_dicts)
259
+
260
+ def main():
261
+
262
+ huggingface_tokenizers ={
263
+ "XLM-RoBERTa": "FacebookAI/xlm-roberta-base",
264
+ "nllb-200-distilled-600M": "facebook/nllb-200-distilled-600M",
265
+ }
266
+ openai_tokenizers = {
267
+ 'gpt-4': 'gpt-4',
268
+
269
+ }
270
+
271
+ st.sidebar.header("Welcome to Tokenization Playground")
272
+
273
+ tabs = ['Playground', 'Analysis']
274
+ selected_tab = selected_tab = st.sidebar.selectbox('Select from options below:', tabs)
275
+
276
+ st.sidebar.markdown("""
277
+ This App was created as a part of the project: "Beyond the ABCs: Exploring the nuances of tokenization in diverse languages.
278
+ """)
279
+
280
+ analyzer = TokenizerAnalyzer()
281
+
282
+ for tokenizer, src in huggingface_tokenizers.items():
283
+ analyzer.add_tokenizer(tokenizer, src)
284
+
285
+ for tokenizer, _ in openai_tokenizers.items():
286
+ analyzer.add_tokenizer(tokenizer, tokenizer)
287
+
288
+ if selected_tab == 'Playground':
289
+ playground_tab(analyzer)
290
+ elif selected_tab == 'Analysis':
291
+ analysis_tab(analyzer)
292
+
293
+
294
+ if __name__ == "__main__":
295
+ main()
296
+
data/aya_dataset_features.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2474077dbede3b143e0308f9509952933d000e5ce777551bc9c6127a7f4cda53
3
+ size 16310640