File size: 71,994 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
{
    "paper_id": "2021",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:12:38.470465Z"
    },
    "title": "Zero-Shot Cross-Lingual Transfer is a Hard Baseline to Beat in German Fine-Grained Entity Typing",
    "authors": [
        {
            "first": "Sabine",
            "middle": [],
            "last": "Weber",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Edinburgh",
                "location": {
                    "country": "UK"
                }
            },
            "email": "s.weber@sms.ed.ac.uk"
        },
        {
            "first": "Mark",
            "middle": [],
            "last": "Steedman",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Edinburgh",
                "location": {
                    "country": "UK"
                }
            },
            "email": "steedman@inf.ed.ac.uk"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "The training of NLP models often requires large amounts of labelled training data, which makes it difficult to expand existing models to new languages. While zero-shot cross-lingual transfer relies on multilingual word embeddings to apply a model trained on one language to another, Yarowsky and Ngai (2001) propose the method of annotation projection to generate training data without manual annotation. This method was successfully used for the tasks of named entity recognition and coarse-grained entity typing, but we show that it is outperformed by zero-shot cross-lingual transfer when applied to the similar task of fine-grained entity typing. In our study of fine-grained entity typing with the FIGER type ontology for German, we show that annotation projection amplifies the English model's tendency to underpredict level 2 labels and is beaten by zero-shot cross-lingual transfer on three novel test sets.",
    "pdf_parse": {
        "paper_id": "2021",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "The training of NLP models often requires large amounts of labelled training data, which makes it difficult to expand existing models to new languages. While zero-shot cross-lingual transfer relies on multilingual word embeddings to apply a model trained on one language to another, Yarowsky and Ngai (2001) propose the method of annotation projection to generate training data without manual annotation. This method was successfully used for the tasks of named entity recognition and coarse-grained entity typing, but we show that it is outperformed by zero-shot cross-lingual transfer when applied to the similar task of fine-grained entity typing. In our study of fine-grained entity typing with the FIGER type ontology for German, we show that annotation projection amplifies the English model's tendency to underpredict level 2 labels and is beaten by zero-shot cross-lingual transfer on three novel test sets.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The task of fine-grained entity typing (FET) is to assign a semantic label to a span in a text. The task is distinct from coarse-grained entity typing as done by named entity recognition systems because these systems are restricted to a small set of labels like 'person', 'organization' and 'location' which are not helpful for tasks that require more precise information about the entities. For example, FET assigns the label '/location/city' to the named entity 'Berlin' in the sentence 'From 1997 to 2000, it had a permanent exhibition in Berlin.'",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Fine-grained entity typing uses a high number of types in a multilevel hierarchy, which can be seen in the level 2 label '/location/city' (see Figure  1) . In this work we use the FIGER type hierarchy which consists of two levels with 112 types in total (37 level 1, 75 level 2). FIGER types are derived from the knowledge graph Freebase (Bollacker et al., 2008) . They are both interpretable by humans and useful in NLP applications such as relation extraction (Kuang et al., 2020) .",
                "cite_spans": [
                    {
                        "start": 338,
                        "end": 362,
                        "text": "(Bollacker et al., 2008)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 462,
                        "end": 482,
                        "text": "(Kuang et al., 2020)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 143,
                        "end": 153,
                        "text": "Figure  1)",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "There are systems for named entity recognition and coarse-grained entity typing in languages other than English (e.g. Stanza (Qi et al., 2020) ), but systems for FET with FIGER types are only available in English, due to the lack of FIGER annotated data in other languages. Because manual annotation is time consuming and expensive, various methods have been proposed to expand NLP models to other languages without additional manual annotation. The method of annotation projection (Yarowsky and Ngai, 2001) uses parallel text to automatically create annotated corpora. Annotations from the resource-rich language are transferred to the resource-poor language using word alignment between translated sentences.",
                "cite_spans": [
                    {
                        "start": 125,
                        "end": 142,
                        "text": "(Qi et al., 2020)",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 482,
                        "end": 507,
                        "text": "(Yarowsky and Ngai, 2001)",
                        "ref_id": "BIBREF17"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Annotation projection has been used successfully for the task of coarse-grained named entity typing in conjunction with named entity recognition (Agerri et al., 2018; Li et al., 2021; Ni et al., 2017) . We follow these examples by using a parallel English-German corpus, automatic named entity recognition and a state of the art English FET model (Chen et al., 2020) to assign FIGER type labels on the English side for transfer. We then project the labels onto the German half of the corpus. The output of this process is a German corpus annotated with FIGER types, which we use to train a German FET model. Another approach to the same problem is zeroshot cross-lingual transfer, in which a model built on multilingual word embeddings and trained on high-resource language data is applied to test data in a different language. Because the English FET model used in this work (Chen et al., 2020) relies on contextualised multilingual word embeddings (XLM-RoBERTa) (Conneau et al., 2019) it is possible to train it on English data and to test it on German.",
                "cite_spans": [
                    {
                        "start": 145,
                        "end": 166,
                        "text": "(Agerri et al., 2018;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 167,
                        "end": 183,
                        "text": "Li et al., 2021;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 184,
                        "end": 200,
                        "text": "Ni et al., 2017)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 347,
                        "end": 366,
                        "text": "(Chen et al., 2020)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 876,
                        "end": 895,
                        "text": "(Chen et al., 2020)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 964,
                        "end": 986,
                        "text": "(Conneau et al., 2019)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We compare the two approaches and show that the annotation projection approach amplifies the model's tendency to underpredict level 2 types, which lowers model performance. We also introduce three new test sets for German FET 1 on which zero-shot cross-lingual transfer performs better than models trained with German or a mix of German and English data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "To the best of our knowledge there is no work that compares annotation projection directly against zero-shot cross-lingual transfer. While annotation projection has been used in a variety of tasks, there has not been a study of a case where this approach fails. Authors admit that the quality of the annotating system plays role (e.g. Ehrmann et al. (2011) ; Ni et al. (2017) ), but they don't specify model properties that are necessary for the approach to work, instead focusing on ways to mitigate noise. Pires et al. (2019) ; Hsu et al. (2019) and Artetxe and Schwenk (2019) show the strengths of zeroshot cross-lingual transfer on a variety of different NLP tasks, but they do not address fine-grained entity typing. Zhao et al. (2020) conclude that zeroshot performance can be improved by choosing a small amount of high quality training data from the target language. We test their approach for the FET scenario, but arrive at unclear results.",
                "cite_spans": [
                    {
                        "start": 335,
                        "end": 356,
                        "text": "Ehrmann et al. (2011)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 359,
                        "end": 375,
                        "text": "Ni et al. (2017)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 508,
                        "end": 527,
                        "text": "Pires et al. (2019)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 530,
                        "end": 547,
                        "text": "Hsu et al. (2019)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 552,
                        "end": 578,
                        "text": "Artetxe and Schwenk (2019)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "In this work we use the hierarchical typing model of Chen et al. (2020) trained on English gold data for the zero-shot approach and also to annotate the English side of the parallel text for annotation projection. We train the model with English silver data to show the amount of noise added by automatic annotation and finally we train it with German data which was produced by annotation projection.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method",
                "sec_num": "3"
            },
            {
                "text": "In the hierarchical typing model the entity and its context are encoded using multilingual XLM-RoBERTa (Conneau et al., 2019) . For each type in the FIGER ontology the model learns a type embedding. It passes the concatenated entity and context vector through a 2-layer feed-forward network that maps into the same space as the type embedding. The score is the inner product between the transformed entity and context vector and the type embedding. For further model details refer to Chen et al. (2020) .",
                "cite_spans": [
                    {
                        "start": 103,
                        "end": 125,
                        "text": "(Conneau et al., 2019)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 484,
                        "end": 502,
                        "text": "Chen et al. (2020)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Method",
                "sec_num": "3"
            },
            {
                "text": "Training Data To contrast the zero-shot crosslingual transfer approach with models trained on automatically annotated and projected data we use three sources of training data. We use the 2M sentences English FIGER corpus as described by (Ling and Weld, 2012) as a source of English human annotated data, which we will refer to as EN gold. The data set consists of English Wikipedia articles and we use it to train the zero-shot gold model. Second, we use English machine annotated data (EN automatic). Annotating English data using a model is the first step of the annotation projection. We use this data to train the zero-shot automatic model to examine the amount of noise added by automatic annotation. We generate EN automatic from English sentences from the Wiki-Matrix corpus Schwenk et al. (2019) 2 , using the hierarchical typing model trained on 2 M sentences EN gold. Lastly, we use German annotation projected data (DE projected) that was generated by projecting the labels from the EN automatic onto German. For the details of our annotation projection pipeline please refer to appendix A. We use this data to train the annotation projected model.",
                "cite_spans": [
                    {
                        "start": 237,
                        "end": 258,
                        "text": "(Ling and Weld, 2012)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "4"
            },
            {
                "text": "We portion each training corpus into slices of 100, 200, 300 and 400 K sentences to compare the influence of data size. For DE projected only 300 K sentences are available, because only part of the parallel sentences in the WikiMatrix corpus are of high enough quality for annotation projection. For details of the selection process refer to appendix A.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "4"
            },
            {
                "text": "An important point for our experiments is the label distribution in the training corpora (see to underpredict the finer-grained level 2 labels (e.g. /person/actor, as opposed to level 1 label /person), which leads to a different distribution of labels in EN gold and the other corpora. Compared to approximately 100 K level 2 labels per 100 K sentences in the gold data, we only see about 50 K level 2 labels in the silver data. This tendency does not depend on the different input data: If we use a model trained on 100 K EN gold to predict labels on an unseen portion of EN gold, only 25% of the resulting annotations are level 2 labels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "4"
            },
            {
                "text": "Metrics Following previous FET literature we evaluate the results of our model using strict accuracy (Acc). The strict accuracy is the ratio of instances where the predicted type set is exactly the same as the gold type set. We also evaluate per hierarchy level.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "4"
            },
            {
                "text": "Test sets We compare performance using the following test corpora: 1) a German machine translation of the test split of the English FIGER corpus (Ling and Weld, 2012) using DeepL, which was manually corrected to eliminate translation and labelling errors (DE-FIGER); 2) 500 manually annotated German sentences from the WikiMatrix corpus (DE-Wiki), which we consider to be more challenging than DE-FIGER, because it contains a wider range of type labels; 3) a small challenge set of 135 sentences taken from DE-Wiki, in which we replaced entities with close string matches to English (e.g. 'Pr\u00e4sident Nixon') with specifically German entities of the same type (e.g. 'Bundeskanzler Kohl'), which we call DE-GermEnt; and 4) for experiments where we mix German and English data, we also compare against test split of the English FIGER corpus (Ling and Weld, 2012) (EN-FIGER). Data set statistics can be seen in table 2.",
                "cite_spans": [
                    {
                        "start": 145,
                        "end": 166,
                        "text": "(Ling and Weld, 2012)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "4"
            },
            {
                "text": "Monolingual training Figure 2 automatic and annotation projected at different training data sizes on the DE-FIGER and the DE-Wiki test sets. Zero-shot gold outperforms zero-shot automatic and annotation projected on both test sets and in all training data sizes. Zero-shot gold trained on the full EN gold data set of 2 M sentences performs only 1% better on level 1 labels and 3% better on level 2 labels than a model trained with 400 K sentences, which shows that smaller data slices are sufficient to reach most of the possible performance with this data set. While for level 1 type labels annotation projected gets close to the performance of zero-shot gold on both test sets, on level 2 type labels the system falls behind zero-shot gold, with a wider gap on DE-Wiki. The comparison between zero-shot automatic and annotation projected is less clear. On the DE-Wiki test set annotation projected consistently outperforms zero-shot automatic, while on DE-FIGER both systems perform very similarly.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 21,
                        "end": 29,
                        "text": "Figure 2",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "5"
            },
            {
                "text": "The high performance of zero-shot gold and the noisier zero-shot automatic might be due to the quality of English and German embeddings in XLM-RoBERTa, as both are high resource languages from the same language family. This confirms Lauscher et al. (2020) who show that this method works especially well for close high resource language pairs and low level semantic tasks. The noise introduced by annotation projection affects level 2 label performance the most (see appendix B and table 1). But the amount of level 2 labels in the training data can not be the only reason for this. The total number of labels in the silver corpora (see table 1) shows that 200 K of silver training data contain approximately the same amount of level 2 labels as 100 K of gold data. Nevertheless, the level 2 performance of systems trained on 200 K of silver data lies behind the model trained on 100 K of EN gold. This points towards the possibility, that not only the amount of level 2 labels in the training data, but also their quality and their Multilingual training The underlying XLM-RoBERTa embeddings allow to train a model with both German and English data. For this we combine slices from DE projected with EN automatic, because these data sets have the same distribution of labels. Table 3 shows the performance of a model trained with evenly mixed data (EN+DE) in comparison with monolingually trained models of the same size tested on DE-FIGER and EN-FIGER. German performance benefits from using both German and English training data, while performance in English is best with only English data. The mixed model does not outperform zero-shot gold on these test sets.",
                "cite_spans": [
                    {
                        "start": 233,
                        "end": 255,
                        "text": "Lauscher et al. (2020)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1277,
                        "end": 1284,
                        "text": "Table 3",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "5"
            },
            {
                "text": "The low performance in the data mixing scenario compared to zero-shot gold can be explained with the distribution of labels in the silver corpora. Due to the noise added when labels are projected from English to German, the mixed model tested in German profits from the addition of higher quality English data, but not vice versa.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "5"
            },
            {
                "text": "Few-shot training Zhao et al. 2020 mance. To test this we take a model trained on 100 K sentences EN gold and fine-tune it by training on the 135 sentence manually annotated DE-GermEnt data set. We evaluate the resulting model's performance on DE-FIGER. In comparison with the model trained on 100 K EN gold only, the performance of the resulting model is 10% lower in accuracy of level 1 labels and 12% lower on level 2 labels. We did not specifically select which sentences to use like Zhao et al. 2020, which is an avenue for future work. The low performance of the few-shot model could be due to the high number of different labels, only a few of which can be observed during few-shot training, but further work is needed to confirm this. German entities To challenge zero-shot gold, we test a model trained 2 M sentences EN gold on the test set DE-GermEnt. Surprisingly, we find that the model performs better on DE-GermEnt than on its English entity counter part, with 1% higher performance on level 1 labels and 3% higher performance on level 2 labels. It is unclear why zero-shot gold behaves this way, and examining this with larger challenge data sets it an avenue for future work.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "5"
            },
            {
                "text": "Our results show that zero-shot cross-lingual transfer building upon XLM-RoBERTa is a strong baseline for the task of FET and the language pair of English and German. It outperforms annotation projection on three new test sets. We also show that in our specific scenario annotation projection using the hierarchical typing model amplifies the models tendency to underpredict level 2 types.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion and Conclusions",
                "sec_num": "6"
            },
            {
                "text": "One way to mitigate these shortcomings would be to sample level 1 and level 2 labels in a training corpus so that they have the same distribution as in the gold data, although this would not control for data quality. Another way could be to machine translate the manually annotated English corpus into German and then use annotation projection, as suggested by Ehrmann et al. (2011) . This way the label distribution of the human annotated data could be preserved as well. Lastly, improving the few-shot approach and designing more challenging test sets are other avenues to explore. ",
                "cite_spans": [
                    {
                        "start": 361,
                        "end": 382,
                        "text": "Ehrmann et al. (2011)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion and Conclusions",
                "sec_num": "6"
            },
            {
                "text": "A.1 Preprocessing A diagram of our pipeline can be seen in figure 3. To annotate the English halves of our parallel corpora with FIGER types preprocessing is necessary. Due to its automatic creation the WikiMatrix corpus contains a small amount of German sentences in its English half and English sentences in its German half, the translations of which are assigned very high confidence. We remove these by discarding the 5000 highest-confidence sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A German Data Creation",
                "sec_num": null
            },
            {
                "text": "To enable annotation by the English FET system, we run a named entity recognition system over the English input sentences (see the second box of Figure 3 ). We used the NER component of Stanza (Qi et al., 2020) for this task. We then use the English FET model to assign FIGER types to the named entities (see the third box of Figure 3 ). The FET model only annotates one entity per sentence. Sentences that contain more than one named entity occur multiple times in the English input, so that each entity receives an annotation.",
                "cite_spans": [
                    {
                        "start": 193,
                        "end": 210,
                        "text": "(Qi et al., 2020)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 145,
                        "end": 153,
                        "text": "Figure 3",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 326,
                        "end": 334,
                        "text": "Figure 3",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "A German Data Creation",
                "sec_num": null
            },
            {
                "text": "We use ZAP to obtain a word alignment between the English and German halves of our parallel corpora (see the fourth box of Figure 3 ). While the similar tools fast_align (Dyer et al., 2013) and Giza++ (Och and Ney, 2003) are language agnostic, ZAP's model for English-German word alignment uses probabilities computed from large-scale parallel corpora. We then use our own code to project the fine grained entity type labels from the annotated English text to its German translation. We use static rules to filter out misalignments, e.g. discarding all cases where not all words of an entity were aligned. We then use the resulting German FET annotated corpus to train our FET model. Because of the ordering by alignment quality in the machine-aligned WikiMatrix corpus, we introduce a preprocessing epoch to the training to mitigate noisy input. During training the model receives the sentences in exactly the order that they occur in the corpus. In the WikiMatrix corpus the sentences are sorted by the confidence of the alignment algorithm. This means that the sentences towards the bottom of the corpus are more likely to be incorrectly aligned. Incorrectly aligned sentences are more likely to have incorrectly projected labels. Therefor the quality of FIGER type annotations in the resulting German Figure 5 : Loss rises and level 1 label accuracy deteriorates as the quality of samples gets worse towards the end of the automatically aligned and sorted corpus. The graphs show a cut-off point at approximately 300 thousand sentences. We use this information to select a high-quality slice of the corpus to train our system. data is higher towards the beginning of the corpus and lower towards its end.",
                "cite_spans": [
                    {
                        "start": 170,
                        "end": 189,
                        "text": "(Dyer et al., 2013)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 194,
                        "end": 220,
                        "text": "Giza++ (Och and Ney, 2003)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 123,
                        "end": 131,
                        "text": "Figure 3",
                        "ref_id": "FIGREF3"
                    },
                    {
                        "start": 1305,
                        "end": 1313,
                        "text": "Figure 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "A.2 Annotation projection and training with noise mitigation",
                "sec_num": null
            },
            {
                "text": "During the first epoch of training this drop in quality can be observed in the change of learning rate and the accuracy of predictions after approximately 300,000 sentences (see Figure 5 ). These curves give us important information about what portion of the data is clean enough to be used in the following epochs of training. It gives us a possible cut off point for our data set at 300,000 sentences, so that in the next epochs we only train on a slice of the corpus before this point.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 178,
                        "end": 186,
                        "text": "Figure 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "A.2 Annotation projection and training with noise mitigation",
                "sec_num": null
            },
            {
                "text": "To show the effect of increasing training data size we select for our experiments 3 slices of data that were processed before the cut off point: the first 100K, 200K and 300K sentences of the corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A.2 Annotation projection and training with noise mitigation",
                "sec_num": null
            },
            {
                "text": "The hierarchical typing model of Chen et al. (2020) 's model tends to under-predict level 2 labels. This property of the model is exacerbated by our annotation projection approach, because we use data generated by this model in English to train the same model in German. Figure 4 shows the confusion matrices for level 1 and level 2 labels for EN gold 2 Mil and DE silver 200K. While for level 1 labels in EN gold there is no dominant class that labels are misclassified to, the most common misprediction for level 2 labels is to assign no label at all, which can be seen as the dotted vertical line in the second upper confusion matrix. When comparing the upper confusion matrices to the lower ones, it becomes clear that this trend to under-predict level 2 labels is even stronger in the German model. This makes sense because the German model is trained on output from the English model, which contains fewer level 2 labels than the human annotated data used to train the English model in he first place. The German model sees less level 2 labels in its training data and therefore doesn't learn to predict them.",
                "cite_spans": [
                    {
                        "start": 33,
                        "end": 51,
                        "text": "Chen et al. (2020)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 271,
                        "end": 279,
                        "text": "Figure 4",
                        "ref_id": "FIGREF4"
                    }
                ],
                "eq_spans": [],
                "section": "B Confusion matrices",
                "sec_num": null
            },
            {
                "text": "In keeping with the EMNLP reproducibility guidelines we report the specifications of the systems that our models where trained on. We trained all models using a single GeForce RTX 2080 Ti GPU. Running the largest model (EN zero-shot trained on 2 million sentences) took approximately 8 hours. Training the other models took under an hour per model. The number of model parameters is 50484362. All hyperparameters of the model were taken from the implementation of Chen et al. (2020) . For the few-shot experiment we increased the training epoch number to stop if there was no more improvement on the development set.",
                "cite_spans": [
                    {
                        "start": 464,
                        "end": 482,
                        "text": "Chen et al. (2020)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "C System and model specifications",
                "sec_num": null
            },
            {
                "text": "all test data sets and relevant code are available under https://github.com/webersab/german_FET",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "While higher quality parallel data sets are available, this is the only one in the domain of Wikipedia articles. Preliminary experiments have shown that domain is an important factor for the quality of automatic FET, which is why we chose domain consistency over data quality for our experiments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "This work was funded by the ERC H2020 Advanced Fellowship GA 742137 SEMANTAX.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": "7"
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Building named entity recognition taggers via parallel corpora",
                "authors": [
                    {
                        "first": "Rodrigo",
                        "middle": [],
                        "last": "Agerri",
                        "suffix": ""
                    },
                    {
                        "first": "Yiling",
                        "middle": [],
                        "last": "Chung",
                        "suffix": ""
                    },
                    {
                        "first": "Itziar",
                        "middle": [],
                        "last": "Aldabe",
                        "suffix": ""
                    },
                    {
                        "first": "Nora",
                        "middle": [],
                        "last": "Aranberri",
                        "suffix": ""
                    },
                    {
                        "first": "Gorka",
                        "middle": [],
                        "last": "Labaka",
                        "suffix": ""
                    },
                    {
                        "first": "German",
                        "middle": [],
                        "last": "Rigau",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rodrigo Agerri, Yiling Chung, Itziar Aldabe, Nora Aranberri, Gorka Labaka, and German Rigau. 2018. Building named entity recognition taggers via paral- lel corpora. In Proceedings of the Eleventh Interna- tional Conference on Language Resources and Eval- uation (LREC 2018).",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond",
                "authors": [
                    {
                        "first": "Mikel",
                        "middle": [],
                        "last": "Artetxe",
                        "suffix": ""
                    },
                    {
                        "first": "Holger",
                        "middle": [],
                        "last": "Schwenk",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Transactions of the Association for Computational Linguistics",
                "volume": "7",
                "issue": "",
                "pages": "597--610",
                "other_ids": {
                    "DOI": [
                        "10.1162/tacl_a_00288"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Mikel Artetxe and Holger Schwenk. 2019. Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond. Transactions of the Association for Computational Linguistics, 7:597-610.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Freebase: a collaboratively created graph database for structuring human knowledge",
                "authors": [
                    {
                        "first": "Kurt",
                        "middle": [],
                        "last": "Bollacker",
                        "suffix": ""
                    },
                    {
                        "first": "Colin",
                        "middle": [],
                        "last": "Evans",
                        "suffix": ""
                    },
                    {
                        "first": "Praveen",
                        "middle": [],
                        "last": "Paritosh",
                        "suffix": ""
                    },
                    {
                        "first": "Tim",
                        "middle": [],
                        "last": "Sturge",
                        "suffix": ""
                    },
                    {
                        "first": "Jamie",
                        "middle": [],
                        "last": "Taylor",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the 2008 ACM SIGMOD international conference on Management of data",
                "volume": "",
                "issue": "",
                "pages": "1247--1250",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kurt Bollacker, Colin Evans, Praveen Paritosh, Tim Sturge, and Jamie Taylor. 2008. Freebase: a collab- oratively created graph database for structuring hu- man knowledge. In Proceedings of the 2008 ACM SIGMOD international conference on Management of data, pages 1247-1250.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Hierarchical entity typing via multi-level learning to rank",
                "authors": [
                    {
                        "first": "Tongfei",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "Yunmo",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "Benjamin",
                        "middle": [],
                        "last": "Van Durme",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
                "volume": "2020",
                "issue": "",
                "pages": "8465--8475",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tongfei Chen, Yunmo Chen, and Benjamin Van Durme. 2020. Hierarchical entity typing via multi-level learning to rank. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics, ACL 2020, Online, July 5-10, 2020, pages 8465-8475.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Unsupervised cross-lingual representation learning at scale",
                "authors": [
                    {
                        "first": "Alexis",
                        "middle": [],
                        "last": "Conneau",
                        "suffix": ""
                    },
                    {
                        "first": "Kartikay",
                        "middle": [],
                        "last": "Khandelwal",
                        "suffix": ""
                    },
                    {
                        "first": "Naman",
                        "middle": [],
                        "last": "Goyal",
                        "suffix": ""
                    },
                    {
                        "first": "Vishrav",
                        "middle": [],
                        "last": "Chaudhary",
                        "suffix": ""
                    },
                    {
                        "first": "Guillaume",
                        "middle": [],
                        "last": "Wenzek",
                        "suffix": ""
                    },
                    {
                        "first": "Francisco",
                        "middle": [],
                        "last": "Guzm\u00e1n",
                        "suffix": ""
                    },
                    {
                        "first": "Edouard",
                        "middle": [],
                        "last": "Grave",
                        "suffix": ""
                    },
                    {
                        "first": "Myle",
                        "middle": [],
                        "last": "Ott",
                        "suffix": ""
                    },
                    {
                        "first": "Luke",
                        "middle": [],
                        "last": "Zettlemoyer",
                        "suffix": ""
                    },
                    {
                        "first": "Veselin",
                        "middle": [],
                        "last": "Stoyanov",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "arXiv": [
                        "arXiv:1911.02116"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzm\u00e1n, Edouard Grave, Myle Ott, Luke Zettle- moyer, and Veselin Stoyanov. 2019. Unsupervised cross-lingual representation learning at scale. arXiv preprint arXiv:1911.02116.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A simple, fast, and effective reparameterization of ibm model 2",
                "authors": [
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Dyer",
                        "suffix": ""
                    },
                    {
                        "first": "Victor",
                        "middle": [],
                        "last": "Chahuneau",
                        "suffix": ""
                    },
                    {
                        "first": "Noah A",
                        "middle": [],
                        "last": "Smith",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "644--648",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chris Dyer, Victor Chahuneau, and Noah A Smith. 2013. A simple, fast, and effective reparameteriza- tion of ibm model 2. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 644-648.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Building a multilingual named entityannotated corpus using annotation projection",
                "authors": [
                    {
                        "first": "Maud",
                        "middle": [],
                        "last": "Ehrmann",
                        "suffix": ""
                    },
                    {
                        "first": "Marco",
                        "middle": [],
                        "last": "Turchi",
                        "suffix": ""
                    },
                    {
                        "first": "Ralf",
                        "middle": [],
                        "last": "Steinberger",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "118--124",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Maud Ehrmann, Marco Turchi, and Ralf Steinberger. 2011. Building a multilingual named entity- annotated corpus using annotation projection. In Proceedings of the International Conference Recent Advances in Natural Language Processing 2011, pages 118-124.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Zero-shot reading comprehension by crosslingual transfer learning with multi-lingual language representation model",
                "authors": [
                    {
                        "first": "Tsung-Yuan",
                        "middle": [],
                        "last": "Hsu",
                        "suffix": ""
                    },
                    {
                        "first": "Chi-Liang",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    },
                    {
                        "first": "Hung-Yi",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
                "volume": "",
                "issue": "",
                "pages": "5933--5940",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/D19-1607"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Tsung-Yuan Hsu, Chi-Liang Liu, and Hung-yi Lee. 2019. Zero-shot reading comprehension by cross- lingual transfer learning with multi-lingual lan- guage representation model. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5933-5940, Hong Kong, China. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Improving neural relation extraction with implicit mutual relations",
                "authors": [
                    {
                        "first": "Jun",
                        "middle": [],
                        "last": "Kuang",
                        "suffix": ""
                    },
                    {
                        "first": "Yixin",
                        "middle": [],
                        "last": "Cao",
                        "suffix": ""
                    },
                    {
                        "first": "Jianbing",
                        "middle": [],
                        "last": "Zheng",
                        "suffix": ""
                    },
                    {
                        "first": "Xiangnan",
                        "middle": [],
                        "last": "He",
                        "suffix": ""
                    },
                    {
                        "first": "Ming",
                        "middle": [],
                        "last": "Gao",
                        "suffix": ""
                    },
                    {
                        "first": "Aoying",
                        "middle": [],
                        "last": "Zhou",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
                "volume": "",
                "issue": "",
                "pages": "1021--1032",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jun Kuang, Yixin Cao, Jianbing Zheng, Xiangnan He, Ming Gao, and Aoying Zhou. 2020. Improving neu- ral relation extraction with implicit mutual relations. In 2020 IEEE 36th International Conference on Data Engineering (ICDE), pages 1021-1032. IEEE.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "From zero to hero: On the limitations of zero-shot language transfer with multilingual transformers",
                "authors": [
                    {
                        "first": "Anne",
                        "middle": [],
                        "last": "Lauscher",
                        "suffix": ""
                    },
                    {
                        "first": "Vinit",
                        "middle": [],
                        "last": "Ravishankar",
                        "suffix": ""
                    },
                    {
                        "first": "Ivan",
                        "middle": [],
                        "last": "Vuli\u0107",
                        "suffix": ""
                    },
                    {
                        "first": "Goran",
                        "middle": [],
                        "last": "Glava\u0161",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
                "volume": "",
                "issue": "",
                "pages": "4483--4499",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Anne Lauscher, Vinit Ravishankar, Ivan Vuli\u0107, and Goran Glava\u0161. 2020. From zero to hero: On the lim- itations of zero-shot language transfer with multilin- gual transformers. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing (EMNLP), pages 4483-4499.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Cross-lingual named entity recognition using parallel corpus: A new approach using xlm-roberta alignment",
                "authors": [
                    {
                        "first": "Bing",
                        "middle": [],
                        "last": "Li",
                        "suffix": ""
                    },
                    {
                        "first": "Yujie",
                        "middle": [],
                        "last": "He",
                        "suffix": ""
                    },
                    {
                        "first": "Wenjin",
                        "middle": [],
                        "last": "Xu",
                        "suffix": ""
                    }
                ],
                "year": 2021,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "arXiv": [
                        "arXiv:2101.11112"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Bing Li, Yujie He, and Wenjin Xu. 2021. Cross-lingual named entity recognition using parallel corpus: A new approach using xlm-roberta alignment. arXiv preprint arXiv:2101.11112.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Fine-grained entity recognition",
                "authors": [
                    {
                        "first": "Xiao",
                        "middle": [],
                        "last": "Ling",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Daniel S Weld",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "AAAI",
                "volume": "12",
                "issue": "",
                "pages": "94--100",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Xiao Ling and Daniel S Weld. 2012. Fine-grained en- tity recognition. In AAAI, volume 12, pages 94-100.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Weakly supervised cross-lingual named entity recognition via effective annotation and representation projection",
                "authors": [
                    {
                        "first": "Jian",
                        "middle": [],
                        "last": "Ni",
                        "suffix": ""
                    },
                    {
                        "first": "Georgiana",
                        "middle": [],
                        "last": "Dinu",
                        "suffix": ""
                    },
                    {
                        "first": "Radu",
                        "middle": [],
                        "last": "Florian",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics",
                "volume": "1",
                "issue": "",
                "pages": "1470--1480",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jian Ni, Georgiana Dinu, and Radu Florian. 2017. Weakly supervised cross-lingual named entity recog- nition via effective annotation and representation projection. In Proceedings of the 55th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1470-1480.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "A systematic comparison of various statistical alignment models",
                "authors": [
                    {
                        "first": "Josef",
                        "middle": [],
                        "last": "Franz",
                        "suffix": ""
                    },
                    {
                        "first": "Hermann",
                        "middle": [],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Ney",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Computational Linguistics",
                "volume": "29",
                "issue": "1",
                "pages": "19--51",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Franz Josef Och and Hermann Ney. 2003. A systematic comparison of various statistical alignment models. Computational Linguistics, 29(1):19-51.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "How multilingual is multilingual BERT?",
                "authors": [
                    {
                        "first": "Telmo",
                        "middle": [],
                        "last": "Pires",
                        "suffix": ""
                    },
                    {
                        "first": "Eva",
                        "middle": [],
                        "last": "Schlinger",
                        "suffix": ""
                    },
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Garrette",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "4996--5001",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/P19-1493"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Telmo Pires, Eva Schlinger, and Dan Garrette. 2019. How multilingual is multilingual BERT? In Pro- ceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4996- 5001, Florence, Italy. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Stanza: A Python natural language processing toolkit for many human languages",
                "authors": [
                    {
                        "first": "Peng",
                        "middle": [],
                        "last": "Qi",
                        "suffix": ""
                    },
                    {
                        "first": "Yuhao",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "Yuhui",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "Jason",
                        "middle": [],
                        "last": "Bolton",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [
                            "D"
                        ],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Peng Qi, Yuhao Zhang, Yuhui Zhang, Jason Bolton, and Christopher D. Manning. 2020. Stanza: A Python natural language processing toolkit for many human languages. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics: System Demonstrations.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Wikimatrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia",
                "authors": [
                    {
                        "first": "Holger",
                        "middle": [],
                        "last": "Schwenk",
                        "suffix": ""
                    },
                    {
                        "first": "Vishrav",
                        "middle": [],
                        "last": "Chaudhary",
                        "suffix": ""
                    },
                    {
                        "first": "Shuo",
                        "middle": [],
                        "last": "Sun",
                        "suffix": ""
                    },
                    {
                        "first": "Hongyu",
                        "middle": [],
                        "last": "Gong",
                        "suffix": ""
                    },
                    {
                        "first": "Francisco",
                        "middle": [],
                        "last": "Guzm\u00e1n",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "arXiv": [
                        "arXiv:1907.05791"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Holger Schwenk, Vishrav Chaudhary, Shuo Sun, Hongyu Gong, and Francisco Guzm\u00e1n. 2019. Wiki- matrix: Mining 135m parallel sentences in 1620 language pairs from wikipedia. arXiv preprint arXiv:1907.05791.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Inducing multilingual pos taggers and NP bracketers via robust projection across aligned corpora",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Yarowsky",
                        "suffix": ""
                    },
                    {
                        "first": "Grace",
                        "middle": [],
                        "last": "Ngai",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Yarowsky and Grace Ngai. 2001. Inducing mul- tilingual pos taggers and NP bracketers via robust projection across aligned corpora. In Proceedings",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "num": null,
                "text": "An example of fine-grained entity typing with the FIGER ontology. Correct types are highlighted.",
                "uris": null
            },
            "FIGREF2": {
                "type_str": "figure",
                "num": null,
                "text": "Zero-shot cross-lingual transfer performs best on both German data sets. EN automatic and DE projected perform similar on both data sets, with a wider gap in level 2 performance on DE-Wiki proportion with level 1 labels play a role here.",
                "uris": null
            },
            "FIGREF3": {
                "type_str": "figure",
                "num": null,
                "text": "Our annotation projection setup uses parallel text and an automatic named entity recognition component to generate an annotated corpus in German.of the 2nd Meeting of the North American Chapter of the Association for Computational Linguistics, pages XXX-XXX. ACL.Mengjie Zhao, Yi Zhu, Ehsan Shareghi, Roi Reichart, Anna Korhonen, and Hinrich Sch\u00fctze. 2020. A closer look at few-shot crosslingual transfer: Variance, benchmarks and baselines. arXiv preprint arXiv:2012.15682.",
                "uris": null
            },
            "FIGREF4": {
                "type_str": "figure",
                "num": null,
                "text": "The upper two matrices show level 1 and level 2 performance of EN gold 2 M, the lower two matrices show the same for DE silver 200K. While the English model has a slight tendency to predict no label for level 2 label, this tendency is stronger in the German model. The yellow vertical line shows this effect.",
                "uris": null
            },
            "TABREF0": {
                "text": ". The hierarchical typing model has the tendency EN gold EN aut. DE proj.",
                "content": "<table><tr><td>Lvl1 labels 60%</td><td>78%</td><td>77%</td></tr><tr><td>Lvl2 labels 40%</td><td>22%</td><td>23%</td></tr><tr><td colspan=\"3\">Lvl1 labels 155679 148571 150166</td></tr><tr><td colspan=\"2\">Lvl2 labels 104807 42008</td><td>43604</td></tr></table>",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF1": {
                "text": "Percentage and total numbers of level 1 and level 2 labels in 100 K sentences of the training corpora. Data created by annotation with the hierarchicaltyping model contains fewer level 2 labels than human annotated gold data.",
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF2": {
                "text": "compares the performance of the models zero-shot gold, zero-shot",
                "content": "<table><tr><td/><td>size</td><td colspan=\"2\">unique lab. t. lab.</td></tr><tr><td>EN-FIGER</td><td>563 sent.</td><td>42</td><td>624</td></tr><tr><td>DE-FIGER</td><td>563 sent.</td><td>42</td><td>624</td></tr><tr><td>DE-Wiki</td><td>500 sent.</td><td>57</td><td>771</td></tr><tr><td colspan=\"2\">DE-GermEnt 135 sent.</td><td>34</td><td>213</td></tr></table>",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF3": {
                "text": "Statistics of the different test sets listing size, number of unique labels and total number of labels. While DE-FIGER is parallel to the commonly used English FIGER test set, DE-Wiki contains more unique labels and more labels in total.",
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "html": null
            },
            "TABREF5": {
                "text": "Performance of a model trained with both English and German data, in comparison with monolingual data tested on parallel test sets. While performance in German is best with mixed data, performance in English is best with only English training data.",
                "content": "<table/>",
                "type_str": "table",
                "num": null,
                "html": null
            }
        }
    }
}