File size: 56,198 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
{
    "paper_id": "I11-1036",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:31:28.380018Z"
    },
    "title": "CODACT: Towards Identifying Orthographic Variants in Dialectal Arabic",
    "authors": [
        {
            "first": "Pradeep",
            "middle": [],
            "last": "Dasigi",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Columbia University",
                "location": {}
            },
            "email": ""
        },
        {
            "first": "Mona",
            "middle": [],
            "last": "Diab",
            "suffix": "",
            "affiliation": {},
            "email": "mdiab@ccls.columbia.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Dialectal Arabic (DA) is the spoken vernacular for over 300M people worldwide. DA is emerging as the form of Arabic written in online communication: chats, emails, blogs, etc. However, most existing NLP tools for Arabic are designed for processing Modern Standard Arabic, a variety that is more formal and scripted. Apart from the genre variation that is a hindrance for any language processing, even in English, DA has no orthographic standard, compared to MSA that has a standard orthography and script. Accordingly, a word may be written in many possible inconsistent spellings rendering the processing of DA very challenging. To solve this problem, such inconsistencies have to be normalized. This work is the first step towards addressing this problem, as we attempt to identify spelling variants in a given textual document. We present an unsupervised clustering approach that addresses the problem of identifying orthographic variants in DA. We employ different similarity measures that exploit string similarity and contextual semantic similarity. To our knowledge this is the first attempt at solving the problem for DA. Our approaches are tested on data in two dialects of Arabic-Egyptian and Levantine. Our system achieves the highest Entropy of 0.19 for Egyptian (corresponding to 68% cluster precision) and Levantine (corresponding to 64% cluster precision) respectively. This constitutes a significant reduction in entropy (from 0.47 for Egyptian and 0.51 for Levantine) and improvement in cluster precision (from 29% for both) from the baseline.",
    "pdf_parse": {
        "paper_id": "I11-1036",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Dialectal Arabic (DA) is the spoken vernacular for over 300M people worldwide. DA is emerging as the form of Arabic written in online communication: chats, emails, blogs, etc. However, most existing NLP tools for Arabic are designed for processing Modern Standard Arabic, a variety that is more formal and scripted. Apart from the genre variation that is a hindrance for any language processing, even in English, DA has no orthographic standard, compared to MSA that has a standard orthography and script. Accordingly, a word may be written in many possible inconsistent spellings rendering the processing of DA very challenging. To solve this problem, such inconsistencies have to be normalized. This work is the first step towards addressing this problem, as we attempt to identify spelling variants in a given textual document. We present an unsupervised clustering approach that addresses the problem of identifying orthographic variants in DA. We employ different similarity measures that exploit string similarity and contextual semantic similarity. To our knowledge this is the first attempt at solving the problem for DA. Our approaches are tested on data in two dialects of Arabic-Egyptian and Levantine. Our system achieves the highest Entropy of 0.19 for Egyptian (corresponding to 68% cluster precision) and Levantine (corresponding to 64% cluster precision) respectively. This constitutes a significant reduction in entropy (from 0.47 for Egyptian and 0.51 for Levantine) and improvement in cluster precision (from 29% for both) from the baseline.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Arabic is the native tongue of over 300M people world wide. The Arabic language exhibits a relatively unique linguistic phenomena known as diglossia (Ferguson, 1959) where two forms of the language live side by side: a standard formal form known as Modern Standard Arabic (MSA) and an informal spoken form, the vernaculars used in everyday communication referred to as Dialectal Arabic (DA). MSA is the only language of education and is used in formal settings and Broadcast news. The only written standard is in MSA using the Arabic script. Technically there are no native speakers of MSA. On the other hand, DA is the mother tongue for all native speakers of Arabic however it is not traditionally a written form of the language and it differs significantly enough from MSA on all levels of linguistic representation that results in huge inconsistencies in orthography. This was not a problem a decade ago from an NLP perspective since all the resources were in MSA. Now with the proliferation of online media and informal genres, DA is ubiquitous online. Users of DA online write in different scripts (Arabic, Romanizations interspersed with digits), they also sometimes write phonemically. Similar to other languages (not unique to DA) in these informal genres, we observe rampant speech effects such as elongations and the use of emoticons within the text which compounds the problem further for processing DA. If NLP tools want to process real Arabic as spoken by its people, they need to address DA seriously. This paper presents an initial attempt at addressing the pervasive inconsistencies in DA orthography in informal media.",
                "cite_spans": [
                    {
                        "start": 149,
                        "end": 165,
                        "text": "(Ferguson, 1959)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We cast the problem of lack of DA orthographic standards as an identification of spelling variants problem using unsupervised clustering techniques. We evaluate our results against a gold corrected set of data in two dialects: Egyptian (EGY) and Levantine (LEV) . We focus our current efforts on identifying the orthographic variants in Arabic script though our work is extendible to the Romanizations as well. Such an identification is a necessary step for normalizing the variation which is useful for addressing the sparseness problem for DA. We contend that there are patterns in the variations that could be captured and processed. Also it is worth pointing out that this problem encompasses the spelling mistakes problem but it goes beyond it to address legitimate orthographic variants. Hence we attempt an approach that is generic enough to cover both scopes.",
                "cite_spans": [
                    {
                        "start": 246,
                        "end": 261,
                        "text": "Levantine (LEV)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "This paper is organized as follows: in Section 2 we show some of the variations between MSA and DA on different levels of linguistic representation; Section 3 discusses some related work; in section 4 we outline our approach and experimental conditions; in Section 5 we describe the data against which we evaluate our approach; we discuss the results and evaluation in Section 6; in Section 7 we discuss errors and performance of the system and approach proposed; finally, in Section 8 we conclude with some final remarks and a look at some future directions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Most of the research effort, to date in creating tools and resources for Arabic has focused on MSA. In recent years we have seen a concentrated effort on making Arabic processing tools on par with English processing tools (Habash and Rambow., 2005; Diab et al., 2007; Kulick, 2010; Green and Manning., 2010) . Researchers interested in handling realistic Arabic text have come to the realization that DA needs to be addressed. Applying state of the art MSA processing tools directly to DA yields very low performance proving the significant difference between the two varieties. For instance applying MSA tokenizers to DA yields a performance of 88% which is completely unacceptable as an initial processing tool performance. It is worth noting that state of the art MSA tokenization is at 99.2% (Diab et al., 2007) . This low performance on DA can be explained by the genre differences (MSA tools are trained on newswire genres) but compared to English, we do not observe such a huge discrepancy between tokenizers trained on newswire when applied to informal genres. The significant drop in performance can be safely relegated to the inherent differences between the two varieties of Arabic. MSA differs from DA on the phonological, morphological, lexical, syntactic, semantic and pragmatic levels. The degree of variation depends on the specific dialect of Arabic. For instance, phonologically MSA would pronounce the word for dress as fustAn and spell it while the same word in LEV is pronounced as fusTAn 1 with an emphatic T and could possibly be written phonemically in LEV as . Morphologically, DA exhibits simpler inflectional morphology than MSA overall however cliticization is more nuanced rendering tokenization a more complex problem in DA than in MSA. For example, DA has lost all explicit marking of grammatical case and dual marking on verbal predicates. The MSA phrase AlmwZ-fAn AklA, , meaning 'the employees dual nominative ate dual', becomes AlmwZfyn AlAtnyn AkAlw, , 'the two employees plural no case ate plural'. Hence we note the loss of dual inflection marking and nominative case marking. On the other hand, clitization is more complex in DA as follows: EGY mAHkytl-hAlhw$, , 'she did not recount it to him' is expressed in three words in MSA as lm tHkyhA lh",
                "cite_spans": [
                    {
                        "start": 222,
                        "end": 248,
                        "text": "(Habash and Rambow., 2005;",
                        "ref_id": null
                    },
                    {
                        "start": 249,
                        "end": 267,
                        "text": "Diab et al., 2007;",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 268,
                        "end": 281,
                        "text": "Kulick, 2010;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 282,
                        "end": 307,
                        "text": "Green and Manning., 2010)",
                        "ref_id": null
                    },
                    {
                        "start": 796,
                        "end": 815,
                        "text": "(Diab et al., 2007)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DA vs. MSA Phenomena",
                "sec_num": "2"
            },
            {
                "text": ". The lexical, syntactic, semantic and pragmatic variations abound between MSA and DA. The phonological and morphological differences lend themselves directly to the orthographic variation problem exhibited with DA. Writers of DA use a myriad of scripts to encode DA. All of which are used inconsistently even within the writings of the same author in the same post/article/blog. The most frequent scripts used are Arabic and Romanization. We note that people have use also Hebrew and Cyrillic scripts to write Arabic as well. For Arabic script we see inconsistencies in characters that exhibit regional variations such as the qAf sound . This letter is pronounced as a glottal stop\u00b4in EGY and LEV but as a g sound in the Gulf states and q sound in Tunisia, in most cases. Speakers and writers pertaining to these different dialects could render it in the orthography as it appears in a word such as he said qAl as\u00c1l [EGY] , gAl [Gulf] or qAl [Tunisian] . Moreover, we observe more severe variants in the Romanized script for the same word where the writers can render the EGY as 2Al, AAl, or qAl. For the purposes of this paper we will focus our discussion on identifying the orthographic variants only in the Arabic script leaving the handling of orthographic variants in Romanization for future work. 2",
                "cite_spans": [
                    {
                        "start": 917,
                        "end": 922,
                        "text": "[EGY]",
                        "ref_id": null
                    },
                    {
                        "start": 929,
                        "end": 935,
                        "text": "[Gulf]",
                        "ref_id": null
                    },
                    {
                        "start": 943,
                        "end": 953,
                        "text": "[Tunisian]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "DA vs. MSA Phenomena",
                "sec_num": "2"
            },
            {
                "text": "Most of the recent work in the area of orthographic variant detection and spelling correction has been towards resolving inconsistencies in spellings of Named Entities(NE). Huang et al. 2008use NE spelling variant detection to improve the performance of Machine Translation (MT) and Information Extraction (IE) systems. (Habash and Metsky, 2008) cluster Urdu phrases mapping to the same English phrase to automatically learn morphological variation rules. Since Urdu is a morphologically rich language, such variations result in many OOV words. They use the these rules learned, as a part of MT system to replace OOV Urdu words with in-vocabulary words online. Raghavan and Allan (2005) use the edit distance metric along with generative models trained from Automatic Speech Recognition (ASR) output to cluster queries to improve performance of Information Retrieval (IR) systems. Bhagat and Hovy 2007attempted to generate all possible spelling variations of a person's name. One method is supervised and uses CMU speech dictionary to train a phonetic model. Another is to cluster a large set of names that are known to sound similar using Soundex (Knuth, 1973) . Although some earlier work related to spelling variations (Golding and Roth, 1999) dealt with the generalized problem, most of the recent work is confined to NEs.This is because of the relevance of the problem to NLP applications such as MT, IE, IR and ASR. Accordingly we note that the problem we try to solve is more generic since the lack of orthographic standard in DA affects the spellings of all kinds of words.",
                "cite_spans": [
                    {
                        "start": 320,
                        "end": 345,
                        "text": "(Habash and Metsky, 2008)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 1148,
                        "end": 1161,
                        "text": "(Knuth, 1973)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "3"
            },
            {
                "text": "Our goal is to identify orthographic variations in textual DA. We build a system, CODACT, that aims at identifying and eventually normalizing such DA orthographic variants. We use techniques noted in the spelling correction literature. Our approach is mainly unsupervised. We view the problem as a clustering problem where our goal is to identify if two strings are similar, and hence cluster together. To that end we explore three basic similarity measures: (a) String based Similarity as direct Levenshtein Edit Distance; (b) String based Similarity Biased Edit Distance; and ( c) Contextual String Similarity. We model the strings of interest in a vector space. We build a matrix for the string types of interest. We induce the clusters from the matrix by grouping the strings in the row entries together based on the similarity of their respective vectors in the matrix. We use Cosine Similarity between vectors and we use the implementation of the CLUTO Repeated Bisection (RB) algorithm with cosine similarity being the measure of similarity between vectors. (Zhao and Karypis, 2001) . CLUTO is very suitable for clustering high dimensional datasets. CLUTO's repeated bisection partition method is used for clustering. In this method, for obtaining a k -way clustering, k-1 repeated bisections are made. Each partition is made to the input dataset such that the clustering criterion function is optimized.",
                "cite_spans": [
                    {
                        "start": 1065,
                        "end": 1089,
                        "text": "(Zhao and Karypis, 2001)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Approach",
                "sec_num": "4"
            },
            {
                "text": "The row entries for the matrix are referred to as the focal string types of interest. We vary the dimensions as follows: (a) for N focal words, we have the same N focal words in the matrix dimensions, yielding an NxN matrix; or (b) the dimensions are all the string types in the corpus of interest yielding an NxM matrix. The cells of the matrix are populated based on one of the different similarity measures or a combination of them after normalization. We describe the different similarity measures next.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Approach",
                "sec_num": "4"
            },
            {
                "text": "Strings that vary from each other minimally are likely to be orthographic variants of one another. Following this intuition, strings are grouped based on their string edit distance. We explore the basic known Levenshtein Edit Distance measure as in (Levenshtein, 1966 ) (LEDM). Moreover we extend the LEDM to account for known phonological variations on the character level. We refer to this as the Biased Edit Distance Metric (BEDM). BEDM has the same exact formulation as LEDM as a metric however it is more relaxed in that it treats letters that are considered similar as if they are the same, i.e. they are not substitutions of each other. The intuition behind adding such a bias is the fact that Arabic letters may have different pronunciations depending on the context. For example, the letter might have a sound equivalent to the any of letters , , and . This is ignored by LEDM, and they are treated as different letters therefore incurring the substitution penalty. When BEDM is applied, any two letters that have the same sound are treated as a match. For example, (1) fstAn, 'dress', , and the possible variants (2) fstAn, (3) fSTAn, (4) fsTAn, and (5) fjTAn would have the following calculations: (1) and (2) would be a perfect match, i.e. a distance of 0 according to both LEDM and BEDM; (1) and (3) would be penalized for substituting S, T for s,t in (1), therefore a distance of 0.4 according to LEDM, however for BEDM s and S are considered similar to each other and so are t and T, then the distance of (1) and 3is 0. Similarily for (1) and (4) according to LEDM the distance is 0.2, but for BEDM the distance is 0. For (1) and (5) the LEDM will be 0.4 and for BEDM it will be 0.2. Hence, BEDM is a more nuanced and relaxed form of LEDM. The list of similar letters is taken from scholar seeded studies of phonological variations across different DA. The list is rendered in Table 1 . We refer to this list as sound change rules (SCR). 3 The SCR are not always symmetric, for example a v can be replaced with a S but not vice versa.",
                "cite_spans": [
                    {
                        "start": 249,
                        "end": 267,
                        "text": "(Levenshtein, 1966",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 1953,
                        "end": 1954,
                        "text": "3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1892,
                        "end": 1899,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "String Based Similarity Metrics",
                "sec_num": "4.1"
            },
            {
                "text": "We explore another relatedness measure of contextual string similarity (CSS). The intuition is that if two strings are variants of each other as they are semantically similar, they are bound to appear with similar contexts. Accordingly we model this notion via representing strings with their context co-occurrence vectors. In this framework , we represent the co-occurrence frequency of the focal string and dimensional string in all the sentences in the corpus within a window of 3 tokens. The observations are aggregated and used in the cell. If the focal string and the dimensional string never co-occur, then the cell value is set to 0. Contextual Similarity Metric (CSS) between two words is defined as a cosine similarity between their context vectors. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Contextual String Similarity",
                "sec_num": "4.2"
            },
            {
                "text": "A {, <, >, ', &, }, w, y, | ' A,{, }, <, >, |, y, &,w } A, y, &, ', {, <, >, |,w & A, y, }, ', {, <, >, |,w | A, y, ', {, <, >, } { A, y, &, ', }, <, >, | < A, ', {, >, |, } > A, ', {, <, |, } t T, v v s, t, S j q, y, $ H h, E d * , D * d, z, Z z * , Z, d s $, S, v $ s, v S s D Z, d, z, * T S, Z, t Z T, D, z, d, * E H g E, x q ', A, }, k, j k q h p, A p h, t w &, A, Y y }, A, Y Y y, A",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Letter Similar Sounding Letters",
                "sec_num": null
            },
            {
                "text": "We experimented with each of these measures in isolation and in combination. In the case of combination, we normalized the values of metrics. Table 2 illustrates the values contained in the cells of the constructed matrices in the different conditions.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 142,
                        "end": 149,
                        "text": "Table 2",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Experimental Conditions",
                "sec_num": "4.3"
            },
            {
                "text": "We have two different matrix dimension sizes depending on how extensive the feature space is. The first case is NxN, meaning that the set of words corresponding to both the rows and columns of the matrix are the focal words. The second case has all the unique words in the corpus representing the columns, making it NxM. This yields 6 isolated conditions and 8 combined conditions. \u2022 LEDM-NxM where the similarity measure is a LEDM and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Conditions",
                "sec_num": "4.3"
            },
            {
                "text": "\u2022 BEDM-NxM where the similarity measure is a BEDM and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 CSS-NxM where the similarity measure is a CSS and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-BEDM-NxN where the similarity measure is a combination of LEDM and BEDM and the matrix size is NxN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-CSS-NxN where the similarity measure is a combination of LEDM and CSS and the matrix size is NxN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 BEDM-CSS-NxN where the similarity measure is a combination of CSS and BEDM and the matrix size is NxN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-BEDM-CSS-NxN where the similarity measure is a combination of LEDM, BEDM, and CSS the matrix size is NxN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-BEDM-NxM where the similarity measure is a combination of LEDM and BEDM and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-CSS-NxM where the similarity measure is a combination of LEDM and CSS and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 BEDM-CSS-NxM where the similarity measure is a combination of CSS and BEDM and the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "\u2022 LEDM-BEDM-CSS-NxM where the similarity measure is a combination of LEDM, BEDM, and CSS the matrix size is NxM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Metric",
                "sec_num": null
            },
            {
                "text": "In order to measure the performance of this approach we need data that has the variants identified. We created such data by asking native speakers of DA to normalize variants into a standard conventionalized form in Arabic script. We targeted two dialects of Arabic: Egyptian (EGY) and Levantine (LEV). For EGY we specifically focused on Cairene Egyptian. For Levantine, we had a collection of Palestinian, Jordanian, Lebanese and Syrian Dialectal data. Most of the data is considered Syrian in our LEV collection however. Both data sets for both DA are derived from the web (Diab et al., 2010) . The data is part of a larger collection we refer to in this paper as COM-MENTDA collection. COMMENTDA comprises 3M token strings for EGY and 3M token strings for LEV. For EGY we had 2 annotators and an adjudicator, and for LEV we had 4 annotators and an adjudicator. The annotators were instructed to identify tokens that are considered incorrect orthographically according to a specific convention that we devised known as CODA (Conventionalized Orthography for Dialectal Arabic) after being trained on CODA (Habash et al., 2011) . The annotators we asked to identify three different classes of variation from the CODA convention: (a) change in spelling of a string which included dealing with speech effects such as elongations, (b) introduction of spaces or splitting a string into multiple strings, and (c ) deletion of spaces or merging strings. Many corrections included simultaneously both a spelling change and a split or merge of a string as well. It is worth noting that roughly 24% of the EGY data had changes of different types on the token level corresponding to 37% of changes to the types for EGY. For LEV, only 11% of the tokens were changed corresponding to 23% of the types that were changed. This suggests that the EGY data had a lot more variability. It was actually noted that a lot of the EGY data was not consistently EGY but rather from other DA compared to LEV that was considered relatively homogeneous. The last row in the table shows the number of cases where annotators agreed with each other on the correction. It also shows their percentages to the number of common annotations as shown by the row above it.",
                "cite_spans": [
                    {
                        "start": 575,
                        "end": 594,
                        "text": "(Diab et al., 2010)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 1106,
                        "end": 1127,
                        "text": "(Habash et al., 2011)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Data",
                "sec_num": "5"
            },
            {
                "text": "We created the gold data clusters of variants by grouping all the strings that are mapped to the same corrected CODA form. This data consisted of 290 clusters of strings in LEV with an average of 2.6 orthographic variants per cluster, and 312 string clusters in EGY with an average of 2.5 variant per cluster. All our experiments are conducted on surface forms of the strings with no preprocessing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Data",
                "sec_num": "5"
            },
            {
                "text": "In order to derive statistics to build our matrices, we use two data sets: COMMENTDA and an augmented data set (RCorpora) which is double the size of COMMENTDA for each dialect. RCorpora comprises 6M strings for EGY, and 6M strings for LEV. In the NxN matrix conditions, the size of the corpora used to derive the statistics only affects the conditions involving CSS. In the NxM conditions, the corpora sizes affect the number of matrix dimensions as well as the cell values for the CSS conditions. Application of seven metrics to four class-data size combinations gives 25 distinct runs per dialect since in NxN case, augmenting the data does not change the metrics LEDM, BEDM, and LEDM+BEDM. It has to be noted that for NxM experiments this is not the case. Table 4 gives the various statistics on the different data sizes of unique string types.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 760,
                        "end": 767,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "Each clustering output is compared with the gold-standard clusters using Purity and Entropy measures (Zhao and Karypis, 2001) . Every word in a given cluster in the output belongs to one or more gold clusters. These gold clusters are referred to as relevant gold clusters of the given output cluster. Purity or precision of a cluster is the fraction of its words in its relevant gold clusters. Entropy gives the measure of ambiguity in the clustering output. The larger the number of word in a relevant gold cluster, the higher the entropy value. In addition to these measures, the value of recall is also calculated. This equals the fraction of words in the relevant gold clusters that are in the given cluster. Together these three measures give a complete assessment of the quality of the output clusters. As a baseline for comparison, where strings are randomly assigned to clusters assuming the gold number of clusters per dialect. Table 5 shows the results of all the experiments described.",
                "cite_spans": [
                    {
                        "start": 101,
                        "end": 125,
                        "text": "(Zhao and Karypis, 2001)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 937,
                        "end": 944,
                        "text": "Table 5",
                        "ref_id": "TABREF7"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "COMMENTDA COMMENTDA+RCorpora (C+R) EGY LEV EGY LEV  NxN  729  717  729  717  NxM 205088 237598 433697  410206   Table 4 : Data Size Variations in the two dimensions conditions",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 47,
                        "end": 119,
                        "text": "LEV  NxN  729  717  729  717  NxM 205088 237598 433697  410206   Table 4",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "6"
            },
            {
                "text": "All the systems outperform the random baseline.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "The best results are presented in bold in Table 5 .",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 42,
                        "end": 49,
                        "text": "Table 5",
                        "ref_id": "TABREF7"
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "Phonological Bias From the results, it can be seen that of the individual metrics, BEDM consistently performs better than LEDM. This shows that introducing a phonological bias while matching letters does have a significant positive effect in identifying spelling variants. We believe that this effect will be more pronounced if the SCR are more tailored to the specific dialect under study. We note that CSS has the worst performance of the three individual metrics in all cases. This may be attributed to the level of processing of the data. The data is dealt with specifically on the surface level and Arabic being a very rich morphological language results in a very sparse distribution of forms. This can be mitigated by using even larger corpora. Typically in such studies that rely on distributional similarity an order of magnitude larger than what we employed is exploited. We relegate this to future work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "Combined Metrics Combination of LEDM and BEDM showed better precision, recall, and entropy than each of them in isolation in every case for both dialects. Although adding CSS has shows improvement in LEDM, mainly in EGY, it actually worsened the performance of BEDM. This is more evident when CSS+LEDM+BEDM is compared with LEDM+BEDM. Almost all the quality measures show that CSS metric adds noise.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "NxM vs. NxN Using a bigger class of words as a feature set for vector similarity significantly improves the performance of the system. This holds for all the metrics in both the dialects.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "Data Augmentation Although there are slight improvements due to increase in data, on the whole this does not seem to affect the performance of the system significantly. We only see some effect in the CSS measures which is expected.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "7"
            },
            {
                "text": "Cluster type Members Gold <n$' w<n$' An$A' LEDM w<n$ w<n$' wgnY wgnYY BEDM <n$' An$A' w<n$' LEDM+BEDM <n$' An$A' w<n$'",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Error analysis",
                "sec_num": "7.1"
            },
            {
                "text": "The example above shows a comparison between LEDM and BEDM metrics. The clusters in second and third columns are the ones nearest to the gold cluster in first column. It can be observed that An$A' is not a part of the LEDM cluster since the word has more dissimilar letters. However, BEDM captures the fact that the words are phonologically similar. The example below shows a similar pattern too. LEDM differentiates ||mdh from the other words too much to identify it to be a potential variant. The next example illustrates the advantage of combining metrics. BEDM gives both higher precision and recall than LEDM when compared to the gold cluster. However, both of them do not have perfect precision or recall while the combined metric has both.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Error analysis",
                "sec_num": "7.1"
            },
            {
                "text": "Members Gold btAEtY btEty LEDM btAEtY bnt bt BEDM btAEtY btEty ty LEDM + BEDM btAEtY btEty A relaxed similarity metric does not necessarily result in higher clustering recall. In the next example, BEDM gives a lower recall than LEDM. However, the combined metric does better than the individual edit distance metrics in this example too. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Cluster type",
                "sec_num": null
            },
            {
                "text": "We compared surface and contextual metrics for identifying spelling variants in DA. We also evaluated all the combinations of those metrics. We present an initial system CODACT. Our results hold clear cross-dialectal trends, showing that string similarity metric with a phonological bias, combined with simple edit distance as a similarity metric is better for this task than raw contextual similarity when the data is limited. The next step in this approach is to refine the co-occurrence model used in our approach. Using lemma forms instead of the surface forms can yield a potential improvement since Arabic is a morphologically rich language. Eventually we plan to develop a system that will automatically normalize orthographic variations in Dialectal Arabic to the CODA convention.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "8"
            },
            {
                "text": "We use the Buckwalter Arabic Transliteration standard for the Romanized Arabic throughout the paper. www. qamus.org",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "It is worth noting that tools such as Yamli and Maren which transliterate Romanization to Arabic script serve as in interesting platform for handling the Romanization problem that could be easily leveraged.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "We are aware that this list can be further refined to reflect the specific dialect under investigation. We plan to incorporate a better customized SCR depending on the variety of DA.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "A winnow-based approach to contextsensitive spelling correction Machine Learning",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Andrew",
                        "suffix": ""
                    },
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Golding",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Roth",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "34",
                "issue": "",
                "pages": "107--130",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Golding and Roth1999] Andrew R. Golding and Dan Roth. 1999. A winnow-based approach to context- sensitive spelling correction Machine Learning, 34(1-3):107-130.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Automatic Learning of Morphological Variations for Handling Out-of-Vocabulary Terms in Urdu-English Machine Translation",
                "authors": [
                    {
                        "first": "Nizar",
                        "middle": [],
                        "last": "Habash",
                        "suffix": ""
                    },
                    {
                        "first": "Hayden",
                        "middle": [],
                        "last": "Metsky",
                        "suffix": ""
                    }
                ],
                "year": 1959,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Habash and Metsky2008] Nizar Habash and Hayden Metsky 1959. Automatic Learning of Morpho- logical Variations for Handling Out-of-Vocabulary Terms in Urdu-English Machine Translation. 8th AMTA conference, Hawaii, 21-25 October 2008",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "When Harry Met Harri: Crosslingual Name Spelling Normalization",
                "authors": [
                    {
                        "first": "Donald",
                        "middle": [
                            "E"
                        ],
                        "last": "Knuth",
                        "suffix": ""
                    }
                ],
                "year": 1973,
                "venue": "Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing",
                "volume": "3",
                "issue": "",
                "pages": "391--399",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Donald E. Knuth. The Art of Computer Programming -Volume 3: Sorting and Searching Addison-Wesley Publishing Company, 1973. [Huang et al.2008] Fei Huang , Ahmad Emami and Imed Zitouni. When Harry Met Harri: Cross- lingual Name Spelling Normalization. Proceedings of the 2008 Conference on Empirical Methods in Natural Language Processing, pages 391-399.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Matching Inconsistently Spelled Names in Automatic Speech Recognizer Output for Information Retrieval",
                "authors": [
                    {
                        "first": "Hema",
                        "middle": [],
                        "last": "Raghavan",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Allan",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "205--210",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Hema Raghavan and James Allan2005] Hema Ragha- van and James Allan. Matching Inconsistently Spelled Names in Automatic Speech Recognizer Output for Information Retrieval. Proceedings of the conference on Human Language Technology and Empirical Methods in Natural Language Processing, 2005. [Kernighan et al.1990] Mark D. Kernighan, Kenneth W. Church, and William A. Gale. 1990. A spelling cor- rection program based on a noisy channel model. Proceedings of COLING-90, pages 205-210",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Automated Methods for Processing Arabic Text: From Tokenization to Base Phrase Chunking",
                "authors": [
                    {
                        "first": "[",
                        "middle": [],
                        "last": "Diab",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Arabic Computational Morphology: Knowledge-based and Empirical Methods. [Habash and Rambow.2005] Nizar Habash and Owen Rambow. 2005. Arabic Tokenization, Morphological Analysis, and Part-of-Speech Tagging in One Fell Swoop. Proceedings of the Conference of American Association for Computational Linguistics (ACL'05)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Diab et al.2010] Mona Diab, Nizar Habash, Owen Rambow, Mohamed Al Tantawy, Yassine Bena- jiba. 2010. COLABA: Arabic Dialect Annotation and Processing.. Proceedings of the Workshop on Semitic Language Processing, LREC, May, Malta [Habash et al.2011] Nizar Habash, Mona Diab, Owen Rambow. 2011. Conventional Orthography for Dialectal Arabic (CODA) V.1.0.. Technical Re- port 137382, http://academiccommons. columbia.edu/catalog/ac:137382, Columbia University, New York, NY, USA [Diab et al.2007] Mona Diab, Kadri Hacioglu and Daniel Jurafsky. 2007. Automated Methods for Processing Arabic Text: From Tokenization to Base Phrase Chunking. In Arabic Computational Mor- phology: Knowledge-based and Empirical Methods. [Habash and Rambow.2005] Nizar Habash and Owen Rambow. 2005. Arabic Tokenization, Morpholog- ical Analysis, and Part-of-Speech Tagging in One Fell Swoop. Proceedings of the Conference of American Association for Computational Linguis- tics (ACL'05).",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Phonetic Models for Generating Spelling Variants",
                "authors": [],
                "year": 2007,
                "venue": "Spence Green and Christopher D. Manning. 2010. Better Arabic Parsing: Baselines, Evaluations, and Analysis. Proceedings of the 23rd International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Bhagat and Hovy.2007] Rahul Bhagat and Eduard Hovy. 2007. Phonetic Models for Generating Spelling Variants. Proceedings of the 20th interna- tional joint conference on Artifical intelligence. [Green and Manning.2010] Spence Green and Christo- pher D. Manning. 2010. Better Arabic Parsing: Baselines, Evaluations, and Analysis. Proceedings of the 23rd International Conference on Computa- tional Linguistics (Coling 2010).",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Simultaneous Tokenization and Part-of-Speech Tagging for Arabic without a Morphological Analyzer",
                "authors": [
                    {
                        "first": "Seth",
                        "middle": [],
                        "last": "Kulick",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the ACL 2010 Conference Short Papers",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Seth Kulick. 2010. Simultaneous To- kenization and Part-of-Speech Tagging for Arabic without a Morphological Analyzer. Proceedings of the ACL 2010 Conference Short Papers.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Binary codes capable of correcing deletions,insertions and reversals",
                "authors": [
                    {
                        "first": "V",
                        "middle": [
                            "I"
                        ],
                        "last": "Levenshtein",
                        "suffix": ""
                    }
                ],
                "year": 1966,
                "venue": "Soviet Physics Doklady",
                "volume": "10",
                "issue": "",
                "pages": "707--710",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "V. I. Levenshtein. 1966. Bi- nary codes capable of correcing deletions,insertions and reversals. Soviet Physics Doklady, Vol. 10, p.707-710.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Empirical and Theoretical Comparisons of Selected Criterion Functions for Document Clustering",
                "authors": [
                    {
                        "first": "Ying",
                        "middle": [],
                        "last": "Zhao",
                        "suffix": ""
                    },
                    {
                        "first": "George",
                        "middle": [],
                        "last": "Karypis",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Machine Learning",
                "volume": "55",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "[Zhao and Karypis2001] Ying Zhao and George Karypis. 2004. Empirical and Theoretical Compar- isons of Selected Criterion Functions for Document Clustering. Machine Learning, Volume 55 Issue 3, June 2004.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF0": {
                "type_str": "table",
                "num": null,
                "html": null,
                "text": "",
                "content": "<table/>"
            },
            "TABREF2": {
                "type_str": "table",
                "num": null,
                "html": null,
                "text": "Matrix Cell Values",
                "content": "<table><tr><td>\u2022 LEDM-NxN where the similarity measure is a LEDM and the matrix size is NxN</td></tr><tr><td>\u2022 BEDM-NxN where the similarity measure is a BEDM and the matrix size is NxN</td></tr><tr><td>\u2022 CSS-NxN where the similarity measure is a CSS and the matrix size is NxN</td></tr></table>"
            },
            "TABREF3": {
                "type_str": "table",
                "num": null,
                "html": null,
                "text": "",
                "content": "<table><tr><td>gives</td></tr></table>"
            },
            "TABREF4": {
                "type_str": "table",
                "num": null,
                "html": null,
                "text": "",
                "content": "<table/>"
            },
            "TABREF7": {
                "type_str": "table",
                "num": null,
                "html": null,
                "text": "Results",
                "content": "<table/>"
            }
        }
    }
}