omarkamali commited on
Commit
2a1003a
·
verified ·
1 Parent(s): 1815cdf

Upload all models and assets for bbc (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +219 -184
  3. models/embeddings/aligned/bbc_128d.bin +3 -0
  4. models/embeddings/aligned/bbc_128d.meta.json +1 -0
  5. models/embeddings/aligned/bbc_128d.projection.npy +3 -0
  6. models/embeddings/aligned/bbc_128d_metadata.json +8 -0
  7. models/embeddings/aligned/bbc_32d.bin +3 -0
  8. models/embeddings/aligned/bbc_32d.meta.json +1 -0
  9. models/embeddings/aligned/bbc_32d.projection.npy +3 -0
  10. models/embeddings/aligned/bbc_32d_metadata.json +8 -0
  11. models/embeddings/aligned/bbc_64d.bin +3 -0
  12. models/embeddings/aligned/bbc_64d.meta.json +1 -0
  13. models/embeddings/aligned/bbc_64d.projection.npy +3 -0
  14. models/embeddings/aligned/bbc_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/bbc_128d.bin +2 -2
  16. models/embeddings/monolingual/bbc_128d_metadata.json +1 -1
  17. models/embeddings/monolingual/bbc_32d.bin +2 -2
  18. models/embeddings/monolingual/bbc_32d_metadata.json +1 -1
  19. models/embeddings/monolingual/bbc_64d.bin +2 -2
  20. models/embeddings/monolingual/bbc_64d_metadata.json +1 -1
  21. models/subword_markov/bbc_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/bbc_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/bbc_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/bbc_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/bbc_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/bbc_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/bbc_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/bbc_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/bbc_2gram_subword.parquet +2 -2
  30. models/subword_ngram/bbc_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/bbc_3gram_subword.parquet +2 -2
  32. models/subword_ngram/bbc_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/bbc_4gram_subword.parquet +2 -2
  34. models/subword_ngram/bbc_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/bbc_5gram_subword.parquet +3 -0
  36. models/subword_ngram/bbc_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/bbc_tokenizer_16k.model +2 -2
  38. models/tokenizer/bbc_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/bbc_tokenizer_32k.model +2 -2
  40. models/tokenizer/bbc_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/bbc_tokenizer_8k.model +2 -2
  42. models/tokenizer/bbc_tokenizer_8k.vocab +0 -0
  43. models/vocabulary/bbc_vocabulary.parquet +2 -2
  44. models/vocabulary/bbc_vocabulary_metadata.json +9 -9
  45. models/word_markov/bbc_markov_ctx1_word.parquet +2 -2
  46. models/word_markov/bbc_markov_ctx1_word_metadata.json +2 -2
  47. models/word_markov/bbc_markov_ctx2_word.parquet +2 -2
  48. models/word_markov/bbc_markov_ctx2_word_metadata.json +2 -2
  49. models/word_markov/bbc_markov_ctx3_word.parquet +2 -2
  50. models/word_markov/bbc_markov_ctx3_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: bbc
3
- language_name: BBC
4
  language_family: austronesian_batak
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-austronesian_batak
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 3.663
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.8223
30
  - name: vocabulary_size
31
  type: vocab
32
  value: 0
33
  generated: 2026-01-03
34
  ---
35
 
36
- # BBC - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **BBC** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -60,7 +70,7 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
60
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
61
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
62
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
63
- - [6. Morphological Analysis (Experimental)](#6-morphological-analysis)
64
  - [7. Summary & Recommendations](#7-summary--recommendations)
65
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
66
  - [Visualizations Index](#visualizations-index)
@@ -80,43 +90,43 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
80
 
81
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
82
  |------------|-------------|---------------|----------|--------------|
83
- | **8k** | 3.308x | 3.31 | 0.2131% | 1,665,136 |
84
- | **16k** | 3.527x | 3.53 | 0.2273% | 1,561,615 |
85
- | **32k** | 3.663x 🏆 | 3.66 | 0.2360% | 1,503,727 |
86
 
87
  ### Tokenization Examples
88
 
89
  Below are sample sentences tokenized with each vocabulary size:
90
 
91
- **Sample 1:** `Pedurungan i ma sada huta na adong di Kecamatan Taman, Kabupaten Pemalang, Propi...`
92
 
93
  | Vocab | Tokens | Count |
94
  |-------|--------|-------|
95
- | 8k | `▁ped ur ungan ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ... (+12 more)` | 22 |
96
- | 16k | `▁pedurungan ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁dikecamatantaman ... (+10 more)` | 20 |
97
- | 32k | `▁pedurungan ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁dikecamatantaman ... (+10 more)` | 20 |
98
 
99
- **Sample 2:** `Mulyoharjo i ma sada Kelurahan na adong di Kecamatan Pemalang, Kabupaten Pemalan...`
100
 
101
  | Vocab | Tokens | Count |
102
  |-------|--------|-------|
103
- | 8k | `▁mul y oharjo ▁i ▁ma ▁sada ▁kelurahanna ▁adongdi ... (+12 more)` | 22 |
104
- | 16k | `▁mulyoharjo ▁i ▁ma ▁sada ▁kelurahanna ▁adong ▁dikecamatanpemalang ... (+10 more)` | 20 |
105
- | 32k | `▁mulyoharjo ▁i ▁ma ▁sada ▁kelurahanna ▁adong ▁dikecamatanpemalang ... (+10 more)` | 20 |
106
 
107
- **Sample 3:** `Klegen i ma sada huta na adong di Kecamatan Comal, Kabupaten Pemalang, Propinsi ...`
108
 
109
  | Vocab | Tokens | Count |
110
  |-------|--------|-------|
111
- | 8k | `▁kl egen ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ... (+11 more)` | 21 |
112
- | 16k | `▁klegen ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ▁comal ... (+10 more)` | 20 |
113
- | 32k | `▁klegen ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ▁comal ... (+10 more)` | 20 |
114
 
115
 
116
  ### Key Findings
117
 
118
- - **Best Compression:** 32k achieves 3.663x compression
119
- - **Lowest UNK Rate:** 8k with 0.2131% unknown tokens
120
  - **Trade-off:** Larger vocabularies improve compression but increase model size
121
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
122
 
@@ -133,12 +143,14 @@ Below are sample sentences tokenized with each vocabulary size:
133
 
134
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
135
  |--------|---------|------------|---------|----------------|------------------|-------------------|
136
- | **2-gram** | Word | 8,528 | 13.06 | 26,426 | 17.5% | 42.8% |
137
- | **2-gram** | Subword | 185 🏆 | 7.53 | 3,491 | 77.6% | 99.2% |
138
- | **3-gram** | Word | 22,579 | 14.46 | 43,165 | 8.3% | 25.2% |
139
- | **3-gram** | Subword | 1,219 | 10.25 | 18,183 | 38.1% | 83.2% |
140
- | **4-gram** | Word | 44,749 | 15.45 | 67,595 | 5.7% | 16.0% |
141
- | **4-gram** | Subword | 5,604 | 12.45 | 70,417 | 19.7% | 54.7% |
 
 
142
 
143
  ### Top 5 N-grams by Size
144
 
@@ -148,8 +160,8 @@ Below are sample sentences tokenized with each vocabulary size:
148
  |------|--------|-------|
149
  | 1 | `angka na` | 4,424 |
150
  | 2 | `dung i` | 4,327 |
151
- | 3 | `ni si` | 4,061 |
152
- | 4 | `i ma` | 3,622 |
153
  | 5 | `ni jahowa` | 2,892 |
154
 
155
  **3-grams (Word):**
@@ -157,57 +169,77 @@ Below are sample sentences tokenized with each vocabulary size:
157
  | Rank | N-gram | Count |
158
  |------|--------|-------|
159
  | 1 | `anak ni si` | 1,613 |
160
- | 2 | `dung i ninna` | 735 |
161
- | 3 | `i ma sada` | 728 |
162
- | 4 | `hata ni jahowa` | 703 |
163
- | 5 | `na adong di` | 690 |
164
 
165
  **4-grams (Word):**
166
 
167
  | Rank | N-gram | Count |
168
  |------|--------|-------|
169
  | 1 | `on do hata ni` | 423 |
170
- | 2 | `songon on do hata` | 408 |
171
- | 3 | `i ma sada huta` | 363 |
172
- | 4 | `angka anak ni si` | 336 |
173
- | 5 | `na adong di kecamatan` | 297 |
 
 
 
 
 
 
 
 
 
 
174
 
175
  **2-grams (Subword):**
176
 
177
  | Rank | N-gram | Count |
178
  |------|--------|-------|
179
- | 1 | `a _` | 206,904 |
180
- | 2 | `a n` | 205,541 |
181
- | 3 | `n g` | 154,122 |
182
- | 4 | `i _` | 143,001 |
183
- | 5 | `n a` | 122,611 |
184
 
185
  **3-grams (Subword):**
186
 
187
  | Rank | N-gram | Count |
188
  |------|--------|-------|
189
- | 1 | `a n g` | 81,985 |
190
- | 2 | `_ m a` | 76,327 |
191
- | 3 | `n a _` | 58,974 |
192
- | 4 | `_ n a` | 53,547 |
193
- | 5 | `a n _` | 51,343 |
194
 
195
  **4-grams (Subword):**
196
 
197
  | Rank | N-gram | Count |
198
  |------|--------|-------|
199
- | 1 | `_ n i _` | 34,971 |
200
- | 2 | `_ n a _` | 33,600 |
201
- | 3 | `_ d i _` | 25,982 |
202
- | 4 | `a n g k` | 24,957 |
203
- | 5 | `_ m a _` | 23,771 |
 
 
 
 
 
 
 
 
 
 
204
 
205
 
206
  ### Key Findings
207
 
208
  - **Best Perplexity:** 2-gram (subword) with 185
209
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
210
- - **Coverage:** Top-1000 patterns cover ~55% of corpus
211
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
212
 
213
  ---
@@ -223,14 +255,14 @@ Below are sample sentences tokenized with each vocabulary size:
223
 
224
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
225
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
226
- | **1** | Word | 0.9188 | 1.890 | 6.44 | 50,697 | 8.1% |
227
- | **1** | Subword | 0.9378 | 1.916 | 7.17 | 1,435 | 6.2% |
228
- | **2** | Word | 0.3742 | 1.296 | 2.02 | 325,909 | 62.6% |
229
- | **2** | Subword | 0.7095 | 1.635 | 4.04 | 10,290 | 29.0% |
230
- | **3** | Word | 0.1535 | 1.112 | 1.28 | 658,447 | 84.6% |
231
- | **3** | Subword | 0.6445 | 1.563 | 3.15 | 41,529 | 35.5% |
232
- | **4** | Word | 0.0590 🏆 | 1.042 | 1.09 | 840,046 | 94.1% |
233
- | **4** | Subword | 0.5191 | 1.433 | 2.39 | 130,734 | 48.1% |
234
 
235
  ### Generated Text Samples (Word-based)
236
 
@@ -238,27 +270,27 @@ Below are text samples generated from each word-based Markov chain model:
238
 
239
  **Context Size 1:**
240
 
241
- 1. `ni harbangan asa dioloi si ferdinand lumban gaol boi manampung 1 11 3 naung disunat 2`
242
- 2. `na marumur sataon na humaliang 5 000 m2 hira hira songon sondang ni raja iii diida`
243
- 3. `i gok daupa sian si daud sian pangasammu do sarita pardapot ni na sampuludua i 22`
244
 
245
  **Context Size 2:**
246
 
247
- 1. `angka na niuhir dohot na tarulang angka bagasnasida jala ndang marnalemba 10 38 ingkon mago roham ba...`
248
- 2. `dung i ninna jesus ma siseanna i ninna parompuan na mabalu disi marmudu ho 17 2 dung`
249
- 3. `ni si beor pangarunding i dibunu halak daniel 6 6 1 hamu pe ditanda sada pangituai parheheon`
250
 
251
  **Context Size 3:**
252
 
253
- 1. `anak ni si jaasia si beno 24 27 ia angka anak ni si aron ma mudar i jala`
254
- 2. `dung i ninna ibana tu ahu hombar tu bagabagam 119 171 sai marbulakkon pujipujian ma angka bibirhu al...`
255
- 3. `i ma sada kecamatan na adong di kecamatan petarukan kabupaten pemalang propinsi jawa tonga indonesia...`
256
 
257
  **Context Size 4:**
258
 
259
- 1. `on do hata ni tuhan jahowa ida ma ahu sandiri pahehehon tu nasida hahisaron dohot hamalumon jala pam...`
260
- 2. `songon on do hata ni jahowa zebaot tu hamu malim angka na palea goarhu hape lam didok hamu do`
261
- 3. `i ma sada huta na maringanan di kecamatan tarutung kabupaten tapanuli utara propinsi sumatera utara ...`
262
 
263
 
264
  ### Generated Text Samples (Subword-based)
@@ -267,34 +299,34 @@ Below are text samples generated from each subword-based Markov chain model:
267
 
268
  **Context Size 1:**
269
 
270
- 1. `_nobedi_anoa?_hi`
271
- 2. `akoni_jarin_a_ng`
272
- 3. `nabomaholalowap_`
273
 
274
  **Context Size 2:**
275
 
276
- 1. `a_sia_ahaan_rohot`
277
- 2. `anahu:_tana_raela`
278
- 3. `ngon_nak_i._10:2_`
279
 
280
  **Context Size 3:**
281
 
282
- 1. `anggo_ia_ingka_jor`
283
- 2. `_marik_marhabus;_a`
284
- 3. `na_5_menjangkup_he`
285
 
286
  **Context Size 4:**
287
 
288
- 1. `_ni_angka_halak_juj`
289
- 2. `_na_hian_gabe_manan`
290
- 3. `_di_bagaska_indones`
291
 
292
 
293
  ### Key Findings
294
 
295
  - **Best Predictability:** Context-4 (word) with 94.1% predictability
296
  - **Branching Factor:** Decreases with context size (more deterministic)
297
- - **Memory Trade-off:** Larger contexts require more storage (130,734 contexts)
298
  - **Recommendation:** Context-3 or Context-4 for text generation
299
 
300
  ---
@@ -310,48 +342,48 @@ Below are text samples generated from each subword-based Markov chain model:
310
 
311
  | Metric | Value |
312
  |--------|-------|
313
- | Vocabulary Size | 24,970 |
314
- | Total Tokens | 972,166 |
315
- | Mean Frequency | 38.93 |
316
  | Median Frequency | 4 |
317
- | Frequency Std Dev | 557.36 |
318
 
319
  ### Most Common Words
320
 
321
  | Rank | Word | Frequency |
322
  |------|------|-----------|
323
- | 1 | ni | 35,042 |
324
- | 2 | na | 33,939 |
325
- | 3 | i | 32,856 |
326
- | 4 | ma | 26,602 |
327
- | 5 | di | 26,003 |
328
- | 6 | tu | 20,420 |
329
- | 7 | do | 19,118 |
330
- | 8 | angka | 17,417 |
331
- | 9 | jala | 14,585 |
332
- | 10 | dohot | 13,546 |
333
 
334
  ### Least Common Words (from vocabulary)
335
 
336
  | Rank | Word | Frequency |
337
  |------|------|-----------|
338
- | 1 | continua | 2 |
339
- | 2 | giuseppe | 2 |
340
- | 3 | mamutuskan | 2 |
341
- | 4 | disidang | 2 |
342
- | 5 | disuspensi | 2 |
343
- | 6 | formula | 2 |
344
- | 7 | dibenarkan | 2 |
345
- | 8 | pidana | 2 |
346
- | 9 | piazza | 2 |
347
- | 10 | fontana | 2 |
348
 
349
  ### Zipf's Law Analysis
350
 
351
  | Metric | Value |
352
  |--------|-------|
353
- | Zipf Coefficient | 1.1798 |
354
- | R² (Goodness of Fit) | 0.997075 |
355
  | Adherence Quality | **excellent** |
356
 
357
  ### Coverage Analysis
@@ -359,15 +391,15 @@ Below are text samples generated from each subword-based Markov chain model:
359
  | Top N Words | Coverage |
360
  |-------------|----------|
361
  | Top 100 | 53.7% |
362
- | Top 1,000 | 78.4% |
363
  | Top 5,000 | 91.4% |
364
  | Top 10,000 | 95.7% |
365
 
366
  ### Key Findings
367
 
368
- - **Zipf Compliance:** R²=0.9971 indicates excellent adherence to Zipf's law
369
  - **High Frequency Dominance:** Top 100 words cover 53.7% of corpus
370
- - **Long Tail:** 14,970 words needed for remaining 4.3% coverage
371
 
372
  ---
373
  ## 5. Word Embeddings Evaluation
@@ -383,37 +415,40 @@ Below are text samples generated from each subword-based Markov chain model:
383
 
384
  ### 5.1 Cross-Lingual Alignment
385
 
386
- > *Note: Multilingual alignment visualization not available for this language.*
 
 
387
 
388
 
389
  ### 5.2 Model Comparison
390
 
391
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
392
  |-------|-----------|----------|------------------|---------------|----------------|
393
- | **mono_32d** | 32 | 0.8223 🏆 | 0.3239 | N/A | N/A |
394
- | **mono_64d** | 64 | 0.7605 | 0.2710 | N/A | N/A |
395
- | **mono_128d** | 128 | 0.4652 | 0.2352 | N/A | N/A |
 
 
 
396
 
397
  ### Key Findings
398
 
399
- - **Best Isotropy:** mono_32d with 0.8223 (more uniform distribution)
400
- - **Semantic Density:** Average pairwise similarity of 0.2767. Lower values indicate better semantic separation.
401
- - **Alignment Quality:** No aligned models evaluated in this run.
402
  - **Recommendation:** 128d aligned for best cross-lingual performance
403
 
404
  ---
405
  ## 6. Morphological Analysis (Experimental)
406
 
407
- > ⚠️ **Warning:** This language shows low morphological productivity. The statistical signals used for this analysis may be noisy or less reliable than for morphologically rich languages.
408
-
409
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
410
 
411
  ### 6.1 Productivity & Complexity
412
 
413
  | Metric | Value | Interpretation | Recommendation |
414
  |--------|-------|----------------|----------------|
415
- | Productivity Index | **0.000** | Low morphological productivity | ⚠️ Likely unreliable |
416
- | Idiomaticity Gap | **-1.000** | Low formulaic content | - |
417
 
418
  ### 6.2 Affix Inventory (Productive Units)
419
 
@@ -422,26 +457,26 @@ These are the most productive prefixes and suffixes identified by sampling the v
422
  #### Productive Prefixes
423
  | Prefix | Examples |
424
  |--------|----------|
425
- | `-pa` | paunsathon, paraguay, panimbukbuk |
426
- | `-ma` | matan, marsogotna, marimbang |
427
- | `-di` | dihalungunhon, dipajonok, dikunjungi |
428
- | `-mar` | marsogotna, marimbang, marsuhat |
429
- | `-si` | sintuana, simalolongna, siotihotik |
430
- | `-man` | manongoshon, maneat, manongos |
431
- | `-par` | paraguay, paransis, parmaraan |
432
- | `-ha` | haro, hadoboon, hakristenon |
433
 
434
  #### Productive Suffixes
435
  | Suffix | Examples |
436
  |--------|----------|
437
- | `-n` | matan, kanan, paunsathon |
438
- | `-a` | bulanda, tanomanmuna, musikna |
439
- | `-on` | paunsathon, manongoshon, dihalungunhon |
440
- | `-na` | tanomanmuna, musikna, marsogotna |
441
- | `-an` | matan, kanan, tibetan |
442
- | `-ng` | marimbang, palding, lumeleng |
443
- | `-hon` | paunsathon, manongoshon, dihalungunhon |
444
- | `-nna` | anginna, binoanna, parumaenna |
445
 
446
  ### 6.3 Bound Stems (Lexical Roots)
447
 
@@ -449,18 +484,18 @@ Bound stems are high-frequency subword units that are semantically cohesive but
449
 
450
  | Stem | Cohesion | Substitutability | Examples |
451
  |------|----------|------------------|----------|
452
- | `anga` | 1.65x | 126 contexts | angan, sanga, langa |
453
- | `angk` | 1.46x | 153 contexts | angka, rangka, dangka |
454
- | `mang` | 1.72x | 61 contexts | amang, damang, mangae |
455
- | `ngka` | 1.53x | 87 contexts | angka, rangka, dangka |
456
- | `ngko` | 1.76x | 41 contexts | ingkon, angkot, tingko |
457
- | `onga` | 1.75x | 36 contexts | longa, tonga, dongan |
458
- | `angg` | 1.39x | 75 contexts | anggo, anggi, anggia |
459
- | `anna` | 1.73x | 31 contexts | hanna, manna, annai |
460
- | `bahe` | 1.78x | 26 contexts | bahen, dibahe, ibahen |
461
- | `ingk` | 1.43x | 59 contexts | ingkon, tingki, lingka |
462
- | `ngan` | 1.37x | 65 contexts | ingan, angan, dongan |
463
- | `ndan` | 1.68x | 25 contexts | ndang, pandan, undang |
464
 
465
  ### 6.4 Affix Compatibility (Co-occurrence)
466
 
@@ -468,16 +503,16 @@ This table shows which prefixes and suffixes most frequently co-occur on the sam
468
 
469
  | Prefix | Suffix | Frequency | Examples |
470
  |--------|--------|-----------|----------|
471
- | `-pa` | `-n` | 372 words | paboaon, pangalelaon |
472
- | `-ma` | `-n` | 227 words | malungun, mambahen |
473
- | `-pa` | `-on` | 208 words | paboaon, pangalelaon |
474
- | `-pa` | `-a` | 196 words | pangkeannasida, padanna |
475
- | `-pa` | `-an` | 162 words | pamalian, parmiahan |
476
- | `-di` | `-n` | 135 words | diparsahitan, dison |
477
- | `-ma` | `-on` | 127 words | mangkalungunhon, mangkasogohon |
478
- | `-pa` | `-na` | 124 words | padanna, pandokna |
479
- | `-ha` | `-n` | 124 words | harun, hamulian |
480
- | `-di` | `-on` | 113 words | dison, ditahbishon |
481
 
482
  ### 6.5 Recursive Morpheme Segmentation
483
 
@@ -485,26 +520,26 @@ Using **Recursive Hierarchical Substitutability**, we decompose complex words in
485
 
486
  | Word | Suggested Split | Confidence | Stem |
487
  |------|-----------------|------------|------|
488
- | dipahatahata | **`di-pa-ha-ta-hata`** | 9.0 | `hata` |
489
- | marhatomanon | **`mar-ha-toman-on`** | 7.5 | `toman` |
490
- | panimbangan | **`pan-imba-ng-an`** | 7.5 | `imba` |
491
- | patongonhon | **`pa-tong-on-hon`** | 7.5 | `tong` |
492
- | hatigoranku | **`ha-tigor-an-ku`** | 7.5 | `tigor` |
493
- | pargogoanku | **`par-gogo-an-ku`** | 7.5 | `gogo` |
494
- | hagaleonku | **`ha-gale-on-ku`** | 7.5 | `gale` |
495
- | dipatongon | **`di-pa-tong-on`** | 7.5 | `tong` |
496
- | taparrohahon | **`ta-par-roha-hon`** | 7.5 | `roha` |
497
- | parhaporseaon | **`par-ha-porsea-on`** | 7.5 | `porsea` |
498
- | hamuliaonku | **`ha-mulia-on-ku`** | 7.5 | `mulia` |
499
- | paluhutonku | **`pa-luhut-on-ku`** | 7.5 | `luhut` |
500
- | hamateanna | **`ha-ma-tean-na`** | 7.5 | `tean` |
501
- | silehononku | **`si-lehon-on-ku`** | 7.5 | `lehon` |
502
- | patoltolonku | **`pa-toltol-on-ku`** | 7.5 | `toltol` |
503
 
504
  ### 6.6 Linguistic Interpretation
505
 
506
  > **Automated Insight:**
507
- The language BBC appears to be more isolating or has a highly fixed vocabulary. Word-level models perform nearly as well as subword models, indicating fewer productive morphological processes.
508
 
509
  ---
510
  ## 7. Summary & Recommendations
@@ -731,4 +766,4 @@ MIT License - Free for academic and commercial use.
731
  ---
732
  *Generated by Wikilangs Models Pipeline*
733
 
734
- *Report Date: 2026-01-03 06:19:26*
 
1
  ---
2
  language: bbc
3
+ language_name: Batak Toba
4
  language_family: austronesian_batak
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-austronesian_batak
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 3.662
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.8133
40
  - name: vocabulary_size
41
  type: vocab
42
  value: 0
43
  generated: 2026-01-03
44
  ---
45
 
46
+ # Batak Toba - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Batak Toba** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
  - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
 
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.300x | 3.30 | 0.2266% | 1,666,856 |
94
+ | **16k** | 3.529x | 3.53 | 0.2423% | 1,558,753 |
95
+ | **32k** | 3.662x 🏆 | 3.66 | 0.2515% | 1,502,009 |
96
 
97
  ### Tokenization Examples
98
 
99
  Below are sample sentences tokenized with each vocabulary size:
100
 
101
+ **Sample 1:** `Janji i ma sada huta (desa) na adong di Kecamatan Siempat Nempu Hilir, Kabupaten...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
+ | 8k | `▁janji ▁i ▁ma ▁sada ▁huta ▁( desa ) ▁na ▁adong ... (+16 more)` | 26 |
106
+ | 16k | `▁janji ▁i ▁ma ▁sada ▁huta ▁( desa )naadong ... (+16 more)` | 26 |
107
+ | 32k | `▁janji ▁i ▁ma ▁sada ▁huta ▁( desa )naadong ... (+16 more)` | 26 |
108
 
109
+ **Sample 2:** `Siboras i ma sada huta (desa) na adong di Kecamatan Silima Pungga Pungga, Kabupa...`
110
 
111
  | Vocab | Tokens | Count |
112
  |-------|--------|-------|
113
+ | 8k | `▁sib oras ▁i ▁ma ▁sada ▁huta( desa ) na ... (+16 more)` | 26 |
114
+ | 16k | `▁siboras ▁i ▁ma ▁sada ▁huta( desa )naadong ... (+15 more)` | 25 |
115
+ | 32k | `▁siboras ▁i ▁ma ▁sada ▁huta( desa )naadong ... (+15 more)` | 25 |
116
 
117
+ **Sample 3:** `Sukorejo i ma sada huta na adong di Kecamatan Ulujami, Kabupaten Pemalang, Propi...`
118
 
119
  | Vocab | Tokens | Count |
120
  |-------|--------|-------|
121
+ | 8k | `▁suk orejo ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ... (+11 more)` | 21 |
122
+ | 16k | `▁sukorejo ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ▁ulujami ... (+10 more)` | 20 |
123
+ | 32k | `▁sukorejo ▁i ▁ma ▁sada ▁huta ▁na ▁adong ▁di ▁kecamatan ▁ulujami ... (+10 more)` | 20 |
124
 
125
 
126
  ### Key Findings
127
 
128
+ - **Best Compression:** 32k achieves 3.662x compression
129
+ - **Lowest UNK Rate:** 8k with 0.2266% unknown tokens
130
  - **Trade-off:** Larger vocabularies improve compression but increase model size
131
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
132
 
 
143
 
144
  | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
145
  |--------|---------|------------|---------|----------------|------------------|-------------------|
146
+ | **2-gram** | Word | 8,503 | 13.05 | 26,404 | 17.5% | 42.9% |
147
+ | **2-gram** | Subword | 185 🏆 | 7.53 | 3,447 | 77.7% | 99.2% |
148
+ | **3-gram** | Word | 22,449 | 14.45 | 43,137 | 8.4% | 25.3% |
149
+ | **3-gram** | Subword | 1,216 | 10.25 | 18,046 | 38.1% | 83.2% |
150
+ | **4-gram** | Word | 44,360 | 15.44 | 67,584 | 5.9% | 16.2% |
151
+ | **4-gram** | Subword | 5,587 | 12.45 | 70,061 | 19.7% | 54.7% |
152
+ | **5-gram** | Word | 29,774 | 14.86 | 42,910 | 7.1% | 18.6% |
153
+ | **5-gram** | Subword | 17,403 | 14.09 | 153,430 | 12.1% | 36.7% |
154
 
155
  ### Top 5 N-grams by Size
156
 
 
160
  |------|--------|-------|
161
  | 1 | `angka na` | 4,424 |
162
  | 2 | `dung i` | 4,327 |
163
+ | 3 | `ni si` | 4,060 |
164
+ | 4 | `i ma` | 3,682 |
165
  | 5 | `ni jahowa` | 2,892 |
166
 
167
  **3-grams (Word):**
 
169
  | Rank | N-gram | Count |
170
  |------|--------|-------|
171
  | 1 | `anak ni si` | 1,613 |
172
+ | 2 | `i ma sada` | 784 |
173
+ | 3 | `na adong di` | 741 |
174
+ | 4 | `dung i ninna` | 735 |
175
+ | 5 | `hata ni jahowa` | 703 |
176
 
177
  **4-grams (Word):**
178
 
179
  | Rank | N-gram | Count |
180
  |------|--------|-------|
181
  | 1 | `on do hata ni` | 423 |
182
+ | 2 | `i ma sada huta` | 417 |
183
+ | 3 | `songon on do hata` | 408 |
184
+ | 4 | `na adong di kecamatan` | 353 |
185
+ | 5 | `angka anak ni si` | 336 |
186
+
187
+ **5-grams (Word):**
188
+
189
+ | Rank | N-gram | Count |
190
+ |------|--------|-------|
191
+ | 1 | `songon on do hata ni` | 406 |
192
+ | 2 | `on do hata ni jahowa` | 250 |
193
+ | 3 | `i ma sada huta na` | 215 |
194
+ | 4 | `desa na adong di kecamatan` | 191 |
195
+ | 5 | `km jala godang ni ruasna` | 175 |
196
 
197
  **2-grams (Subword):**
198
 
199
  | Rank | N-gram | Count |
200
  |------|--------|-------|
201
+ | 1 | `a _` | 206,965 |
202
+ | 2 | `a n` | 205,323 |
203
+ | 3 | `n g` | 154,062 |
204
+ | 4 | `i _` | 142,882 |
205
+ | 5 | `n a` | 122,548 |
206
 
207
  **3-grams (Subword):**
208
 
209
  | Rank | N-gram | Count |
210
  |------|--------|-------|
211
+ | 1 | `a n g` | 81,918 |
212
+ | 2 | `_ m a` | 76,355 |
213
+ | 3 | `n a _` | 58,981 |
214
+ | 4 | `_ n a` | 53,557 |
215
+ | 5 | `a n _` | 51,287 |
216
 
217
  **4-grams (Subword):**
218
 
219
  | Rank | N-gram | Count |
220
  |------|--------|-------|
221
+ | 1 | `_ n i _` | 34,904 |
222
+ | 2 | `_ n a _` | 33,621 |
223
+ | 3 | `_ d i _` | 25,919 |
224
+ | 4 | `a n g k` | 24,948 |
225
+ | 5 | `_ m a _` | 23,827 |
226
+
227
+ **5-grams (Subword):**
228
+
229
+ | Rank | N-gram | Count |
230
+ |------|--------|-------|
231
+ | 1 | `a n g k a` | 19,235 |
232
+ | 2 | `_ a n g k` | 17,946 |
233
+ | 3 | `n g k a _` | 17,765 |
234
+ | 4 | `_ j a l a` | 14,671 |
235
+ | 5 | `j a l a _` | 14,594 |
236
 
237
 
238
  ### Key Findings
239
 
240
  - **Best Perplexity:** 2-gram (subword) with 185
241
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
242
+ - **Coverage:** Top-1000 patterns cover ~37% of corpus
243
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
244
 
245
  ---
 
255
 
256
  | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
257
  |---------|---------|-------------|------------|------------------|-----------------|----------------|
258
+ | **1** | Word | 0.9199 | 1.892 | 6.44 | 50,491 | 8.0% |
259
+ | **1** | Subword | 0.9288 | 1.904 | 7.09 | 1,431 | 7.1% |
260
+ | **2** | Word | 0.3746 | 1.296 | 2.02 | 324,952 | 62.5% |
261
+ | **2** | Subword | 0.7034 | 1.628 | 4.04 | 10,144 | 29.7% |
262
+ | **3** | Word | 0.1537 | 1.112 | 1.28 | 656,964 | 84.6% |
263
+ | **3** | Subword | 0.6472 | 1.566 | 3.17 | 40,950 | 35.3% |
264
+ | **4** | Word | 0.0591 🏆 | 1.042 | 1.09 | 838,369 | 94.1% |
265
+ | **4** | Subword | 0.5206 | 1.435 | 2.40 | 129,601 | 47.9% |
266
 
267
  ### Generated Text Samples (Word-based)
268
 
 
270
 
271
  **Context Size 1:**
272
 
273
+ 1. `ni tano naung leleng on marupaya maningkathon kesadaran masarakat na pauli pintu ni si hannas dohot`
274
+ 2. `na talup do angka naposongku alai anggo raoanna nang jahudi tubu ni halak batak di tongatongamu`
275
+ 3. `i si arni anak ni harangan na mengatur istimewa dok gumodang sian saluhut na nidabuna i`
276
 
277
  **Context Size 2:**
278
 
279
+ 1. `angka na di ginjang ni angka ompunami umbahen manjadi angka i tu ahu do jahowa molo ahu`
280
+ 2. `dung i ro di salelenglelengna psalmen 94 94 1 ale anaha sai parateatehon hamu panariason ni bibirhon`
281
+ 3. `ni si jakkob anak ni si rehabeam di jerusalem 7 17 dua lombu lima birubiru tunggal sada`
282
 
283
  **Context Size 3:**
284
 
285
+ 1. `anak ni si aron hahanasida i marhalado di joro ni jahowa tungkan jolo ni rimberimbe i 40 27`
286
+ 2. `i ma sada nagara na maringanan di lobu panjang`
287
+ 3. `na adong di halak batak toba tombur tarbahen sian sibuk ni manuk na dibumbui`
288
 
289
  **Context Size 4:**
290
 
291
+ 1. `on do hata ni tuhan jahowa nunga pola hupatoltol tanganku maruari ingkon lehononku do i tu ompumuna ...`
292
+ 2. `i ma sada huta na adong di kecamatan silima pungga pungga kabupaten dairi propinsi sumatera utara in...`
293
+ 3. `songon on do hata ni tuhan jahowa hape so tutu jahowa mandok 22 29 ia situan na torop isi`
294
 
295
 
296
  ### Generated Text Samples (Subword-based)
 
299
 
300
  **Context Size 1:**
301
 
302
+ 1. `_man_i_sa_nina_s`
303
+ 2. `amai_palalaseu_n`
304
+ 3. `ndi_ᯔ_no_pa_de_d`
305
 
306
  **Context Size 2:**
307
 
308
+ 1. `a_lamar_na._jalut`
309
+ 2. `ani_ni_ahit_bando`
310
+ 3. `ng_dongkop_hot_ad`
311
 
312
  **Context Size 3:**
313
 
314
+ 1. `angitlawa_rajai,_d`
315
+ 2. `_marhalahite_hite_`
316
+ 3. `na_sapangku_imbolo`
317
 
318
  **Context Size 4:**
319
 
320
+ 1. `_ni_jahowa_hamu_ang`
321
+ 2. `_na_marsaro_mameuth`
322
+ 3. `_di_jeremia_7_novem`
323
 
324
 
325
  ### Key Findings
326
 
327
  - **Best Predictability:** Context-4 (word) with 94.1% predictability
328
  - **Branching Factor:** Decreases with context size (more deterministic)
329
+ - **Memory Trade-off:** Larger contexts require more storage (129,601 contexts)
330
  - **Recommendation:** Context-3 or Context-4 for text generation
331
 
332
  ---
 
342
 
343
  | Metric | Value |
344
  |--------|-------|
345
+ | Vocabulary Size | 24,923 |
346
+ | Total Tokens | 971,594 |
347
+ | Mean Frequency | 38.98 |
348
  | Median Frequency | 4 |
349
+ | Frequency Std Dev | 557.86 |
350
 
351
  ### Most Common Words
352
 
353
  | Rank | Word | Frequency |
354
  |------|------|-----------|
355
+ | 1 | ni | 34,971 |
356
+ | 2 | na | 33,958 |
357
+ | 3 | i | 32,913 |
358
+ | 4 | ma | 26,658 |
359
+ | 5 | di | 25,940 |
360
+ | 6 | tu | 20,429 |
361
+ | 7 | do | 19,116 |
362
+ | 8 | angka | 17,411 |
363
+ | 9 | jala | 14,584 |
364
+ | 10 | dohot | 13,515 |
365
 
366
  ### Least Common Words (from vocabulary)
367
 
368
  | Rank | Word | Frequency |
369
  |------|------|-----------|
370
+ | 1 | ᯇᯔᯒᯪᯉ᯲ᯖ | 2 |
371
+ | 2 | kayo | 2 |
372
+ | 3 | uttar | 2 |
373
+ | 4 | ltr | 2 |
374
+ | 5 | font | 2 |
375
+ | 6 | ebrima | 2 |
376
+ | 7 | border | 2 |
377
+ | 8 | cellpadding | 2 |
378
+ | 9 | td | 2 |
379
+ | 10 | align | 2 |
380
 
381
  ### Zipf's Law Analysis
382
 
383
  | Metric | Value |
384
  |--------|-------|
385
+ | Zipf Coefficient | 1.1806 |
386
+ | R² (Goodness of Fit) | 0.997033 |
387
  | Adherence Quality | **excellent** |
388
 
389
  ### Coverage Analysis
 
391
  | Top N Words | Coverage |
392
  |-------------|----------|
393
  | Top 100 | 53.7% |
394
+ | Top 1,000 | 78.5% |
395
  | Top 5,000 | 91.4% |
396
  | Top 10,000 | 95.7% |
397
 
398
  ### Key Findings
399
 
400
+ - **Zipf Compliance:** R²=0.9970 indicates excellent adherence to Zipf's law
401
  - **High Frequency Dominance:** Top 100 words cover 53.7% of corpus
402
+ - **Long Tail:** 14,923 words needed for remaining 4.3% coverage
403
 
404
  ---
405
  ## 5. Word Embeddings Evaluation
 
415
 
416
  ### 5.1 Cross-Lingual Alignment
417
 
418
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
419
+
420
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
421
 
422
 
423
  ### 5.2 Model Comparison
424
 
425
  | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
426
  |-------|-----------|----------|------------------|---------------|----------------|
427
+ | **mono_32d** | 32 | 0.8133 | 0.3464 | N/A | N/A |
428
+ | **mono_64d** | 64 | 0.7715 | 0.2725 | N/A | N/A |
429
+ | **mono_128d** | 128 | 0.4709 | 0.2523 | N/A | N/A |
430
+ | **aligned_32d** | 32 | 0.8133 🏆 | 0.3386 | 0.0140 | 0.1240 |
431
+ | **aligned_64d** | 64 | 0.7715 | 0.2780 | 0.0560 | 0.2460 |
432
+ | **aligned_128d** | 128 | 0.4709 | 0.2525 | 0.1340 | 0.3160 |
433
 
434
  ### Key Findings
435
 
436
+ - **Best Isotropy:** aligned_32d with 0.8133 (more uniform distribution)
437
+ - **Semantic Density:** Average pairwise similarity of 0.2900. Lower values indicate better semantic separation.
438
+ - **Alignment Quality:** Aligned models achieve up to 13.4% R@1 in cross-lingual retrieval.
439
  - **Recommendation:** 128d aligned for best cross-lingual performance
440
 
441
  ---
442
  ## 6. Morphological Analysis (Experimental)
443
 
 
 
444
  This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
445
 
446
  ### 6.1 Productivity & Complexity
447
 
448
  | Metric | Value | Interpretation | Recommendation |
449
  |--------|-------|----------------|----------------|
450
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
451
+ | Idiomaticity Gap | **-0.493** | Low formulaic content | - |
452
 
453
  ### 6.2 Affix Inventory (Productive Units)
454
 
 
457
  #### Productive Prefixes
458
  | Prefix | Examples |
459
  |--------|----------|
460
+ | `-ma` | mangain, manuhati, mamingkiri |
461
+ | `-pa` | pangir, pahosing, parsonduk |
462
+ | `-di` | disiorhon, didege, diri |
463
+ | `-man` | mangain, manuhati, mangkasiholi |
464
+ | `-mar` | marilah, marhabanhaban, marnioli |
465
+ | `-ha` | hapistaranmuna, harajaon, hanna |
466
+ | `-par` | parsonduk, partalianta, parnidaan |
467
+ | `-si` | sitorus, sitalutuk, sinimpan |
468
 
469
  #### Productive Suffixes
470
  | Suffix | Examples |
471
  |--------|----------|
472
+ | `-n` | disiorhon, mangain, getasan |
473
+ | `-a` | acara, opatsa, hapistaranmuna |
474
+ | `-on` | disiorhon, harajaon, mandaon |
475
+ | `-an` | getasan, nangkohan, bulanan |
476
+ | `-na` | hapistaranmuna, etonganna, utamana |
477
+ | `-hon` | disiorhon, hinungkuphon, ditoishon |
478
+ | `-ng` | humosing, pahosing, taretong |
479
+ | `-nna` | etonganna, hanna, salpuanna |
480
 
481
  ### 6.3 Bound Stems (Lexical Roots)
482
 
 
484
 
485
  | Stem | Cohesion | Substitutability | Examples |
486
  |------|----------|------------------|----------|
487
+ | `anga` | 1.61x | 127 contexts | angan, langa, sanga |
488
+ | `angk` | 1.53x | 157 contexts | angka, bangko, angkal |
489
+ | `ngka` | 1.56x | 89 contexts | angka, bungka, engkau |
490
+ | `mang` | 1.64x | 61 contexts | amang, mangan, memang |
491
+ | `ngko` | 1.70x | 42 contexts | bangko, ingkon, angkot |
492
+ | `bang` | 1.45x | 72 contexts | bange, abang, bangis |
493
+ | `ingk` | 1.48x | 60 contexts | lingka, ingkau, ingkon |
494
+ | `onga` | 1.68x | 36 contexts | tonga, longa, bongal |
495
+ | `bahe` | 1.79x | 26 contexts | bahen, dibahe, ibahen |
496
+ | `ngan` | 1.40x | 65 contexts | angan, ingan, mangan |
497
+ | `ongo` | 1.62x | 36 contexts | longo, kongo, rongom |
498
+ | `angg` | 1.31x | 78 contexts | anggi, anggo, angguk |
499
 
500
  ### 6.4 Affix Compatibility (Co-occurrence)
501
 
 
503
 
504
  | Prefix | Suffix | Frequency | Examples |
505
  |--------|--------|-----------|----------|
506
+ | `-pa` | `-n` | 358 words | parsapataan, partingkian |
507
+ | `-ma` | `-n` | 206 words | marpadanpadan, marharajaon |
508
+ | `-pa` | `-on` | 200 words | patoltolhon, paimbarhon |
509
+ | `-pa` | `-a` | 184 words | pallawa, pasalihonsa |
510
+ | `-pa` | `-an` | 157 words | parsapataan, partingkian |
511
+ | `-di` | `-n` | 156 words | disiaphon, dilembagahon |
512
+ | `-di` | `-on` | 134 words | disiaphon, dilembagahon |
513
+ | `-ha` | `-n` | 128 words | hasundatan, hasusaan |
514
+ | `-pa` | `-na` | 119 words | parsuhatonmuna, pabalionna |
515
+ | `-ma` | `-on` | 116 words | marharajaon, mangaluhon |
516
 
517
  ### 6.5 Recursive Morpheme Segmentation
518
 
 
520
 
521
  | Word | Suggested Split | Confidence | Stem |
522
  |------|-----------------|------------|------|
523
+ | pabotohononku | **`pa-boto-hon-on-ku`** | 9.0 | `boto` |
524
+ | paradiananku | **`par-adian-an-ku`** | 7.5 | `adian` |
525
+ | sipasahaton | **`si-pa-sahat-on`** | 7.5 | `sahat` |
526
+ | marparmangsian | **`mar-par-mang-sian`** | 7.5 | `sian` |
527
+ | panailingku | **`pan-aili-ng-ku`** | 7.5 | `aili` |
528
+ | pardonganan | **`par-dong-an-an`** | 7.5 | `dong` |
529
+ | marhamuliaon | **`mar-ha-mulia-on`** | 7.5 | `mulia` |
530
+ | diparsiajari | **`di-par-si-ajari`** | 7.5 | `ajari` |
531
+ | sipaingotna | **`si-pa-ingot-na`** | 7.5 | `ingot` |
532
+ | sipatudoson | **`si-pa-tudos-on`** | 7.5 | `tudos` |
533
+ | dipangasahon | **`di-pan-gasa-hon`** | 7.5 | `gasa` |
534
+ | situtungon | **`si-tutu-ng-on`** | 7.5 | `tutu` |
535
+ | pasahaton | **`pa-sahat-on`** | 6.0 | `sahat` |
536
+ | parbungkason | **`par-bungkas-on`** | 6.0 | `bungkas` |
537
+ | dipajomba | **`di-pa-jomba`** | 6.0 | `jomba` |
538
 
539
  ### 6.6 Linguistic Interpretation
540
 
541
  > **Automated Insight:**
542
+ The language Batak Toba shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
543
 
544
  ---
545
  ## 7. Summary & Recommendations
 
766
  ---
767
  *Generated by Wikilangs Models Pipeline*
768
 
769
+ *Report Date: 2026-01-03 18:37:11*
models/embeddings/aligned/bbc_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db941daa1a238eb429c0856609956fc5e3a6ddec6eeac89c07f912c914729ae9
3
+ size 1039397332
models/embeddings/aligned/bbc_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bbc", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bbc_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767629a9deca03ea25f9cdc4e8e1e25348498bc9de6ac0a2539f39f23fd9ac75
3
+ size 65664
models/embeddings/aligned/bbc_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bbc",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2549,
7
+ "vocab_size": 14784
8
+ }
models/embeddings/aligned/bbc_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb8a25efba10b973f5be14a0219a05e94fa03854664bd39525994d2192f1d66b
3
+ size 260043220
models/embeddings/aligned/bbc_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bbc", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bbc_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:076a48c590ed90d83b5eac50a58f04d6814dd38b06b1cec4dc1639589e7496b3
3
+ size 4224
models/embeddings/aligned/bbc_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bbc",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2549,
7
+ "vocab_size": 14784
8
+ }
models/embeddings/aligned/bbc_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42625ddab9acd5cc385f903d92348185f8d85b955a1567ca788b464774779bf5
3
+ size 519827924
models/embeddings/aligned/bbc_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "bbc", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/bbc_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88ec95d37b1b426e1b7c58a62e1f62de7ebb5819e6260b200fde5305df163f9c
3
+ size 16512
models/embeddings/aligned/bbc_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "bbc",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 2549,
7
+ "vocab_size": 14784
8
+ }
models/embeddings/monolingual/bbc_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:299d624499ec82c937aa1769fa092d3bd9d976c6d08c2af9b22f767740074a59
3
- size 1039420296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db941daa1a238eb429c0856609956fc5e3a6ddec6eeac89c07f912c914729ae9
3
+ size 1039397332
models/embeddings/monolingual/bbc_128d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
- "vocab_size": 14806
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 128
13
  },
14
+ "vocab_size": 14784
15
  }
models/embeddings/monolingual/bbc_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:981df92afd62ee12261b51c6b8c02f4b1e76c4f2d6c299c0dd0e78da12e9ac2b
3
- size 260049288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb8a25efba10b973f5be14a0219a05e94fa03854664bd39525994d2192f1d66b
3
+ size 260043220
models/embeddings/monolingual/bbc_32d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
- "vocab_size": 14806
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 32
13
  },
14
+ "vocab_size": 14784
15
  }
models/embeddings/monolingual/bbc_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7fddeff473650e7228b6c4a71a4299b4d9ee94908cca09fc7492ca327d34b12
3
- size 519839624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42625ddab9acd5cc385f903d92348185f8d85b955a1567ca788b464774779bf5
3
+ size 519827924
models/embeddings/monolingual/bbc_64d_metadata.json CHANGED
@@ -11,5 +11,5 @@
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
- "vocab_size": 14806
15
  }
 
11
  "encoding_method": "rope",
12
  "dim": 64
13
  },
14
+ "vocab_size": 14784
15
  }
models/subword_markov/bbc_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5aefbfb1171b4b00a844e78d07315efbcf62f55a2b30f5bd6d940b5ba83804d7
3
- size 81484
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89bc24e63801d026741c9770f27a242c3db3ea8fb9cb0d8ba39338f308114970
3
+ size 81519
models/subword_markov/bbc_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_contexts": 1435,
6
- "total_transitions": 5702376
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_contexts": 1431,
6
+ "total_transitions": 5697823
7
  }
models/subword_markov/bbc_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f38ba522fe1bac3abb75948ca7e8ea124307971bfee224a267114a3c9466873
3
- size 358445
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:138603c6263ca67f9e130c880be690e3756d1d9ecf6b93c354a434f14aa57618
3
+ size 351441
models/subword_markov/bbc_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_contexts": 10290,
6
- "total_transitions": 5701163
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_contexts": 10144,
6
+ "total_transitions": 5696555
7
  }
models/subword_markov/bbc_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a3da300cf244b2ab02f8cc4190a829feb85819df2984a6b1a78f37ab7daec14
3
- size 1190319
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94c482b526e4cb960e9393fae965fdf64fa79bbbf1e143460a24fd4aa483396b
3
+ size 1146284
models/subword_markov/bbc_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_contexts": 41529,
6
- "total_transitions": 5699950
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_contexts": 40950,
6
+ "total_transitions": 5695287
7
  }
models/subword_markov/bbc_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32b0388a33d7717a0deb5945264197ca362aded134c2c728e20da7a5ddee82f4
3
- size 2842522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b039fd6d0aa4782f53e7290985fb2258f331baa62713443dd1054f64b5c3c8e3
3
+ size 2799128
models/subword_markov/bbc_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_contexts": 130734,
6
- "total_transitions": 5698737
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_contexts": 129601,
6
+ "total_transitions": 5694019
7
  }
models/subword_ngram/bbc_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d706d757ef98f7e2eaf523f46f0a602b8f06b25cb1d00579847f22b28eece7f1
3
- size 46126
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b3a78edcc4e35abb2d7b965693bc05197f5c62414bb88673db4bf764173ecb0
3
+ size 45512
models/subword_ngram/bbc_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_ngrams": 3491,
6
- "total_ngrams": 5702376
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_ngrams": 3447,
6
+ "total_ngrams": 5697823
7
  }
models/subword_ngram/bbc_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:578bacf93c61827c89b4f8427603b775cd6b7891f9ceecc9c90545263f0f5768
3
- size 237045
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41ccbc1d8480359187b9d615f1be0b4c1262a3ea1bdcfca81011124a20f81767
3
+ size 235063
models/subword_ngram/bbc_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_ngrams": 18183,
6
- "total_ngrams": 5701163
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_ngrams": 18046,
6
+ "total_ngrams": 5696555
7
  }
models/subword_ngram/bbc_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66675bb6688469ac3db798effd708b7b3e5f75e0f119c579a8f76b3e2b98fa07
3
- size 867430
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ea50db1a30e410e829c4a9c7d0cb13fef3e47362ab69b8f634296acece7b2d
3
+ size 869502
models/subword_ngram/bbc_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bbc",
5
- "unique_ngrams": 70417,
6
- "total_ngrams": 5699950
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "bbc",
5
+ "unique_ngrams": 70061,
6
+ "total_ngrams": 5695287
7
  }
models/subword_ngram/bbc_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36b5cf7926926b024d64230a625b6b67cd8ab4c3b3746c91e61bb344deb68792
3
+ size 1830889
models/subword_ngram/bbc_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "bbc",
5
+ "unique_ngrams": 153430,
6
+ "total_ngrams": 5694019
7
+ }
models/tokenizer/bbc_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a176fe5ca0bdc5334c45eb11b39e17d285bd6a1469ac9c9df2ca41a79f9a60c
3
- size 510189
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1650a1c23f388853574d53a3d3fb000a00bb7bc4c39549ead619b3c2aee53ec
3
+ size 509631
models/tokenizer/bbc_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bbc_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92d486b2cdc4197f788488f7923c1a959c468622b7cc6f4cc652213c29688a42
3
- size 804933
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3cee0f0f305e0d69ab5b0d215b509e0fcdeb5b2a555f63c778099f19694127
3
+ size 802096
models/tokenizer/bbc_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/bbc_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd7423a518aa21b134b1db88b66fe95833b16223e79c31fab654c72db01f19bd
3
- size 370418
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d498f661bf0788efe8069713f8df23c80442e16e51511ff106c171572555d999
3
+ size 370350
models/tokenizer/bbc_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/bbc_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:063a599013021fa7eb8f83c98669f631f73439cf9f45048819ae9c1bfe1070e2
3
- size 419766
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75ec6538303ac9c5d834f88cd6177f23420e79bf1f874507e2c0181c6043df01
3
+ size 424926
models/vocabulary/bbc_vocabulary_metadata.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "language": "bbc",
3
- "vocabulary_size": 24970,
4
  "variant": "full",
5
  "statistics": {
6
- "type_token_ratio": 0.05084399284522539,
7
  "coverage": {
8
- "top_100": 0.5228987859930757,
9
- "top_1000": 0.7641910545275995,
10
- "top_5000": 0.8904758325943073,
11
- "top_10000": 0.9321639184916853
12
  },
13
- "hapax_count": 25769,
14
- "hapax_ratio": 0.507873627781391,
15
- "total_documents": 1213
16
  }
17
  }
 
1
  {
2
  "language": "bbc",
3
+ "vocabulary_size": 24923,
4
  "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.0506737344352153,
7
  "coverage": {
8
+ "top_100": 0.5233598374653907,
9
+ "top_1000": 0.7647650478388052,
10
+ "top_5000": 0.8909128833346871,
11
+ "top_10000": 0.9324851609953039
12
  },
13
+ "hapax_count": 25609,
14
+ "hapax_ratio": 0.506787778041637,
15
+ "total_documents": 1268
16
  }
17
  }
models/word_markov/bbc_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71dc86808478f99a7145bcc0a4ff6b725b6972f69dc381f3ae944224fbe4c16e
3
- size 2205032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87071f14275c8ab9dc4e10bd8a8cefd56ab9e6f5efbf03cd62f76cedc788e3b9
3
+ size 2170275
models/word_markov/bbc_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bbc",
5
- "unique_contexts": 50697,
6
- "total_transitions": 996722
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "bbc",
5
+ "unique_contexts": 50491,
6
+ "total_transitions": 995935
7
  }
models/word_markov/bbc_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:728d851895842f58fe81fb7ab5ed1ae78d0307c5849828fe282e48bfdcd66b91
3
- size 6594255
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:394d7c0642f04e130f4d28719227a225dcac50c0850c3f41e78341d5113d0770
3
+ size 6561285
models/word_markov/bbc_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bbc",
5
- "unique_contexts": 325909,
6
- "total_transitions": 995509
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "bbc",
5
+ "unique_contexts": 324952,
6
+ "total_transitions": 994667
7
  }
models/word_markov/bbc_markov_ctx3_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6ad77c32e92404bdcd431db829da9cd2b7fba3d776d4add45047f520d777bd
3
- size 10832852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bcf87aef1351b5c03923a2f80f78168cda522841f4beab737b95a27b137376
3
+ size 10814318
models/word_markov/bbc_markov_ctx3_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "bbc",
5
- "unique_contexts": 658447,
6
- "total_transitions": 994305
7
  }
 
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "bbc",
5
+ "unique_contexts": 656964,
6
+ "total_transitions": 993408
7
  }