File size: 64,420 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
{
    "paper_id": "2020",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:28:21.366442Z"
    },
    "title": "Studying the Impact of Filling Information Gaps on the Output Quality of Neural Data-to-Text",
    "authors": [
        {
            "first": "Craig",
            "middle": [],
            "last": "Thomson",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Aberdeen",
                "location": {
                    "country": "UK"
                }
            },
            "email": "c.thomson@abdn.ac.uk"
        },
        {
            "first": "Zhijie",
            "middle": [],
            "last": "Zhao",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Aberdeen",
                "location": {
                    "country": "UK"
                }
            },
            "email": ""
        },
        {
            "first": "Somayajulu",
            "middle": [],
            "last": "Gowri",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Aberdeen",
                "location": {
                    "country": "UK"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "It is unfair to expect neural data-to-text to produce high quality output when there are gaps between system input data and information contained in the training text. Thomson et al. (2020) identify and narrow information gaps in Rotowire, a popular data-to-text dataset. In this paper, we describe a study which finds that a state-of-the-art neural data-to-text system produces higher quality output, according to the information extraction (IE) based metrics, when additional input data is carefully selected from this newly available source. It remains to be shown, however, whether IE metrics used in this study correlate well with humans in judging text quality.",
    "pdf_parse": {
        "paper_id": "2020",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "It is unfair to expect neural data-to-text to produce high quality output when there are gaps between system input data and information contained in the training text. Thomson et al. (2020) identify and narrow information gaps in Rotowire, a popular data-to-text dataset. In this paper, we describe a study which finds that a state-of-the-art neural data-to-text system produces higher quality output, according to the information extraction (IE) based metrics, when additional input data is carefully selected from this newly available source. It remains to be shown, however, whether IE metrics used in this study correlate well with humans in judging text quality.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The ecological validity (de Vries et al., 2020) of data-to-text tasks requires that tasks resemble, as closely as possible, real-world problems. Only if this is the case can neural data-to-text solutions be operationally deployed with confidence. In the context of data-to-text, one of the issues with ecological validity is that most real-world tasks involve sizeable input data, with longer and more complex texts than are found in 'toy-sized' datasets. We must be able to see a path to a machine learning task which closely resembles a real-world scenario and allows us to investigate important research questions. We should aim to improve both the dataset and the task continuously. Generating summaries of basketball games from tabled data with the Rotowire dataset, as introduced by Wiseman et al. (2017a) for the English language, moves us closer to an ecologically valid data-to-text task.",
                "cite_spans": [
                    {
                        "start": 789,
                        "end": 811,
                        "text": "Wiseman et al. (2017a)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The original Rotowire dataset has been found to contain gaps between the information in the input data, and the information content of the training text. This makes the task unfair for evaluating neural data-to-text systems that are required to generate high quality output text, with hardly any factual errors. The SportSett:Basketball dataset (Thomson et al., 2020) addresses these data issues by fixing information gaps in the input data, whilst maintaining the original human-authored texts. Given that there is now at least an order of magnitude more data per game, we should consider which subset of data to train the system on, and what if any pre-processing should be preformed.",
                "cite_spans": [
                    {
                        "start": 345,
                        "end": 367,
                        "text": "(Thomson et al., 2020)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We added some of this newly available data to an existing state-of-the-art neural data-to-text system (Rebuffel et al., 2020) and found improvement across a range of metrics. We used this system since, at the time of writing, it is one of the most recent, best performing and easiest to configure. We also discuss here which types of data could be added in the future, as well as some difficulties that may be encountered in doing so.",
                "cite_spans": [
                    {
                        "start": 102,
                        "end": 125,
                        "text": "(Rebuffel et al., 2020)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Many systems have been designed and evaluated using the Rotowire dataset (Wiseman et al., 2017a; Puduppully et al., 2019a,b; Wang, 2019; Gong et al., 2019; Iso et al., 2019; Rebuffel et al., 2020) . Most of these works focus on adjusting the architecture of the system whilst using similar input data. Gong et al. (2019) is a notable exception, as their architecture change allows box score and other data from previous games in their input. This was, however, still data from the original Rotowire dataset.",
                "cite_spans": [
                    {
                        "start": 73,
                        "end": 96,
                        "text": "(Wiseman et al., 2017a;",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 97,
                        "end": 124,
                        "text": "Puduppully et al., 2019a,b;",
                        "ref_id": null
                    },
                    {
                        "start": 125,
                        "end": 136,
                        "text": "Wang, 2019;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 137,
                        "end": 155,
                        "text": "Gong et al., 2019;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 156,
                        "end": 173,
                        "text": "Iso et al., 2019;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 174,
                        "end": 196,
                        "text": "Rebuffel et al., 2020)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "Some works have attempted to better align data to text in other datasets, with techniques such as semantic control (Du\u0161ek et al., 2019) . For Rotowire, this has been investigated by Wang (2019) , which aims to prevent generation of sentences not grounded in the data. With this approach, some of the most common sentence types from the human narrative, such as the subsequent opponents of each team, are not generated. This difference is crucial when determining ecological validity of the task. The aim is to replicate the human author in the act of writing a summary of the basketball game, including its full narrative structure.",
                "cite_spans": [
                    {
                        "start": 115,
                        "end": 135,
                        "text": "(Du\u0161ek et al., 2019)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 182,
                        "end": 193,
                        "text": "Wang (2019)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "3 Information Gaps in Data-to-Text Shown in Figure 1 is an example textual summary from the Rotowire dataset. An example partial box score is shown in Table 1 . There are numerous cases where information conveyed by the text is not present in the same form in the box score or other game data. These information gaps should be investigated in order to improve the machine learning task.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 44,
                        "end": 52,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 151,
                        "end": 158,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "We performed a machine-assisted corpus analysis, using the spaCy syntactic parser (Honnibal, 2015) to group sentences which only differ by entity. We do this using an abstraction process where we replace named entities with special tokens comprised of their part-of-speech and entity label. Some manual rules are added to the parser to handle domain specific syntax. By this process the sentence (S01 from Figure 1 ) 'The Atlanta Hawks (41-9) beat the Washington Wizards (31-19) 105-96 on Wednesday.', is transformed to 'PROPN-ORG (X-Y) beat the PROPN-ORG (X-Y) X-Y on NOUN-DATE.'.",
                "cite_spans": [
                    {
                        "start": 82,
                        "end": 98,
                        "text": "(Honnibal, 2015)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 406,
                        "end": 414,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "We then count and read these abstract sentence types to find statements common to the narrative, but with attribute types not present in the data. For example, sentences with the same abstract form as S1 occur 26 times in the training corpus, with more than 800 further sentences of a similar form (using defeat instead of beat, or also including the location/stadium). It is the most common type of thing to say in the opening sentence of these summaries (which teams played, when, and where). There are, however, important attributes in these sentences which are not provided by the original Rotowire data. When generating game summaries, systems will often hallucinate these attributes as they deem it probable that such language is included in the summary, but the attribute is not available to the copy attention mechanism. In the case of our above example, the day of the week is not available in the data. The stadium in which the game was played, as well as the city and/or state within which the stadium stands, are also not available despite being common in variants of this opening sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Related Work",
                "sec_num": "2"
            },
            {
                "text": "In S02 and S11 from Figure 1 we notice that the games being discussed are not the game being summarised, they are previous or subsequent games for these teams. This is common in the training corpus as well. Handling data for previous games is complex (see subsection 3.2). However, data for the subsequent game can be easily obtained provided that a yearly partition scheme like that proposed by Thomson et al. (2020) is used. If such a partition scheme is not used, we cannot guarantee that a previous or subsequent game was not used to condition the system during training.",
                "cite_spans": [
                    {
                        "start": 396,
                        "end": 417,
                        "text": "Thomson et al. (2020)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 20,
                        "end": 28,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Missing Game Information",
                "sec_num": "3.1"
            },
            {
                "text": "We also see in S09, a mention of the conference/division structure of the league. These are known sets, which can change over time but are fixed within a season. In the NBA there are 2 conferences, each with 3 divisions of 5 teams.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Missing Game Information",
                "sec_num": "3.1"
            },
            {
                "text": "The hierarchical encoder of Rebuffel et al. (2020) takes as input a set of entities, where each entity is a set of 24 tuples 1 , and each tuple describes an attribute of that entity. An example entity would be a PLAYER, which might have attributes such as 'NAME-Kyrie Irving', 'POINTS-30', 'TEAM-Celtics', and 'REB-8'. If there are not 24 attributes of the entity then it is padded with 'NULL-NULL' tuples.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Missing Game Information",
                "sec_num": "3.1"
            },
            {
                "text": "To model attributes of the current and subsequent games, we include in our input data an additional entity of type GAME, as well as two additional entities of type NEXT-GAME. These entities were chosen because our machine-assisted corpus analysis highlighted that sentences about the game date and location, as well as those for upcoming games for each team, were common in the human-authored texts, but not supported by the original Rotowire data. The newly available data from SportSett allowed us to fill these gaps. In the two NEXT-GAME entities (one each for the two teams which are the focus of the current game summary), we include attributes for season, month, day of the week, stadium, capacity, and finally both team names plus their respective division and conference names. For the GAME entity, we include the same attributes as for NEXT-GAME, plus the attendance for the game (which was obviously not available for NEXT-GAME as those events have yet to take place).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Missing Game Information",
                "sec_num": "3.1"
            },
            {
                "text": "The Atlanta Hawks (41-9) beat the Washington Wizards (31-19) 105-96 on Wednesday.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S01:",
                "sec_num": null
            },
            {
                "text": "The Hawks bounced back after losing their first game of 2015, a 115-100 loss at the hands of the New Orleans Pelicans on Monday.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S02:",
                "sec_num": null
            },
            {
                "text": "Jeff Teague was Atlanta's top scorer against the Wizards, recording 26 points on 9-of-13 shooting from the field. Kyle Korver was kept in check with just six points in a team-high 37 minutes. S04:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S03:",
                "sec_num": null
            },
            {
                "text": "He helped get his teammates involved as he dished out six assists.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S03:",
                "sec_num": null
            },
            {
                "text": "Al Horford has put up at least 20 points and 10 rebounds in three of his last five games.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S05:",
                "sec_num": null
            },
            {
                "text": "The Wizards have now lost four straight, which is their longest losing streak of the season. S07:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "They have lost by single-digits in all four games, and are now 0-3 against Atlanta this season. S08:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "John Wall led Washington with 24 points and nine assists. Bradley Beal was coming off an 18-point, 11-rebound effort against Charlotte on Monday. S09:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "He proceeded to post 23 points in 39 minutes in this matchup of two of the Eastern Conference's top three teams. S10:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "Washington did a great job slowing down Korver, but it wasn't enough to get them the win. S11:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "Washington will take their losing streak to Charlotte on Thursday, while the Hawks will welcome the Golden State Warriors to town Friday. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "S06:",
                "sec_num": null
            },
            {
                "text": "In subsection 3.1 we described adding data which easily fit our chosen encoder. It is worth noting, however, that there is much more data available in the SportSett database, and it can be presented to our neural system in different formats. One argument is that we should take all the atomic data, along with, perhaps, its structure, then create an encoder which accepts data in that form. The atomic entities are players, which are grouped into teams, divisions, conferences and leagues. The atomic events are plays, the act of one or more players acquiring countable statistics. See Thomson et al. (2020) Section 2.1 for a more detailed analysis of entities and dimensions in this dataset. The NLG system would then be tasked with learning both the language, as well as the underlying mathematics for the statistics. Whilst we could include all atomic events as training data, this would greatly increase the size of each input sample. We could alternatively in-clude carefully selected aggregated forms of data, although creating rules to determine what should be included may be time consuming, and likely domain specific. There would also be many combinations of these derived attributes. The key question is should all possible attributes which are to be realised be available to copy attention, or, should all attributes be transformable from the atomic data? An approach combining these different types of input data could also be used.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data of Varying Forms",
                "sec_num": "3.2"
            },
            {
                "text": "Examples of aggregated forms of data could be anything from the percentage of shots which were successful for a player/team, to the average points a player has scored over an arbitrary span of games. One common inclusion in summaries is the aggregated statistic 'double-double' 2 , where players are said to have recorded double-digits in exactly two of points, rebounds, assists, blocks and steals. Mentions of previous games in the summary frequently use phrases in the form 'X of his/their last Y'. This can be seen in S05 of Figure 1 where we learn that Al Holford has scored greater than 20 points, and recorded more than 10 rebounds in exactly 3 of his last 5 games. There would be an impractical quantity of combinations for 'X out of Y' based statements, even if Y had a maximum of 5-10. Other similar aggregations, such as 'scored a combined 60 points over his last 3 games.' compound this problem.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 529,
                        "end": 537,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Data of Varying Forms",
                "sec_num": "3.2"
            },
            {
                "text": "It is unclear whether models could learn such mathematical operations since even very large and powerful models, such as GPT-3 (Brown et al., 2020) , currently only demonstrate simple addition and subtraction. This important and difficult aspect of defining the problem requires further research, but is essential if we are to create a machine learning task which is ecologically valid. The original Rotowire dataset contains a mix of countable statistics (at the game level only) and derived statistics such as percentages.",
                "cite_spans": [
                    {
                        "start": 127,
                        "end": 147,
                        "text": "(Brown et al., 2020)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data of Varying Forms",
                "sec_num": "3.2"
            },
            {
                "text": "The information extraction (IE) metrics of Relation Generation (RG), Content Selection (CS), and Content Ordering (CO), have been used extensively to compare systems operating on the Rotowire based datasets. These metrics are also key to the system design philosophy of Wiseman et al. (2017a) which aims to \"[exploit] the fact that post-hoc information extraction is significantly easier than generation itself.\" All of these metrics are based on IE models which learn to link statements in the text to tuples in the data. These models are trained on a corpus which has been automatically annotated using rules. Names or numbers in the text are linked to the possible data tuples which could represent them. During evaluation, tuples are predicted for the test text summaries, with a name, value and a type, e.g. 'Atlanta-96-TEAM-PTS'. These predicted tuples can be compared with the tuples in the data in order to determine whether facts predicted from the text match those in the data. For example, if the fact extracted from the text is 'Atlanta-96-TEAM-PTS', but in the data we see 'Atlanta-105-TEAM-PTS', then based on the match of both name and type, we can determine that the number is wrong.",
                "cite_spans": [
                    {
                        "start": 270,
                        "end": 292,
                        "text": "Wiseman et al. (2017a)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation by Information Extraction",
                "sec_num": "4"
            },
            {
                "text": "The CS metrics use these tuples to measure how many of the tuples from the predicted text exist in the gold standard text. The CO metric measures the order of these tuples. For more details see (Wiseman et al., 2017b) (we have purposefully cited the arXiv version of this paper as it includes an additional appendix detailing the procedure).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation by Information Extraction",
                "sec_num": "4"
            },
            {
                "text": "We extend the IE based metrics using the data now available in Thomson et al. (2020) . Details can be found on GitHub 3 . We make two modifications:",
                "cite_spans": [
                    {
                        "start": 63,
                        "end": 84,
                        "text": "Thomson et al. (2020)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extended IE Metrics",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Extend the annotation logic such that it can detect the additional entities and attributes we added in subsection 3.1. For example, days of the week for both the current game and the subsequent game for each team.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extended IE Metrics",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 Use a season-based partition scheme so that the IE model is not being used to evaluate data upon which it was previously conditioned. We use the 2014, 2015 and 2016 seasons to train, 2017 to validate, and 2018 to test. This is the same problem in the partition scheme which was identified for the text generation system by Thomson et al. (2020) .",
                "cite_spans": [
                    {
                        "start": 325,
                        "end": 346,
                        "text": "Thomson et al. (2020)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extended IE Metrics",
                "sec_num": "4.1"
            },
            {
                "text": "5 Experimental Setup",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Extended IE Metrics",
                "sec_num": "4.1"
            },
            {
                "text": "We created two datasets:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLG System Setup",
                "sec_num": "5.1"
            },
            {
                "text": "D1: Where we emulated as closely as possible the data format and content used by Rebuffel et al. (2020) except using season-based partitions.",
                "cite_spans": [
                    {
                        "start": 81,
                        "end": 103,
                        "text": "Rebuffel et al. (2020)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLG System Setup",
                "sec_num": "5.1"
            },
            {
                "text": "D2: Keeping all data from D1, but adding a new entity for the GAME, and two for the NEXT-GAME as detailed in subsection 3.1.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLG System Setup",
                "sec_num": "5.1"
            },
            {
                "text": "We then trained models using the system of Rebuffel et al. (2020) on each dataset, with 10 different random seeds, to determine whether adding the additional information improved the results. We also tested whether changing the early-stop strategy impacted the results, taking a snapshot from training using each of the best BLEU (Papineni et al., 2002) , RG, CS-PREC (precision), CS-REC (recall), and CO. To summarise, we used two datasets, with 10 random seed each, all with 4 different early-stop strategies for 80 models total (2*10*4). We then calculated BLEU, RG, CS-PREC, CS-REC, and CO for each model.",
                "cite_spans": [
                    {
                        "start": 330,
                        "end": 353,
                        "text": "(Papineni et al., 2002)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "NLG System Setup",
                "sec_num": "5.1"
            },
            {
                "text": "We trained IE models following the general procedure proposed by (Wiseman et al., 2017b) . We trained with different random seeds and learning rates, then chose the best 3 LSTM and the best 3 Convolutional models to ensemble. We then used the model ensemble, as well as BLEU-4 (Papineni et al., 2002) , to evaluate the NLG system itself. Table 2 shows the results of our evaluation. We find a statistically significant difference (p < 0.005) for all information extraction based metrics (RG, CS-PREC, CS-REC, and CO) when we add the additional information as described in subsection 3.1. Information extraction based metrics increased in all cases when adding data, regardless of earlystopping method. Whilst BLEU scores also appeared to increase, we did not find the changes in them to be statistically significant. This is not surprising given that BLEU is known to not correlate for NLG (Reiter and Belz, 2009; Reiter, 2018) , and even in machine translation it only correlates when differences are large (Mathur et al., 2020) .",
                "cite_spans": [
                    {
                        "start": 65,
                        "end": 88,
                        "text": "(Wiseman et al., 2017b)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 277,
                        "end": 300,
                        "text": "(Papineni et al., 2002)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 890,
                        "end": 913,
                        "text": "(Reiter and Belz, 2009;",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 914,
                        "end": 927,
                        "text": "Reiter, 2018)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 1008,
                        "end": 1029,
                        "text": "(Mathur et al., 2020)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 338,
                        "end": 345,
                        "text": "Table 2",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Automated Metric Setup",
                "sec_num": "5.2"
            },
            {
                "text": "Our results show that identifying data which should be included, then modelling it within the system architecture, increased all information extraction based metrics. Existing metrics have only been evaluated in limited ways for this domain. Improved metrics could help us evaluate systems, as well as find and categorise information gaps.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "7"
            },
            {
                "text": "The subset of data selected for input, the form it takes (atomic versus aggregated), as well as the inclusion of system components/techniques (copy attention mechanism, hierarchical encoder, separate document plan, fact grounding, etc.), are all variables which could affect system performance. We plan in future work to perform ablation studies to determine which such variables, and in which combination, produce the best results. As part of this, we aim to create a unified code-base which will allow for components to be selected and configured in combination, for as many different data forms as possible.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "7"
            },
            {
                "text": "Beyond this, we hope to move away from end-toend system designs. This is similar in spirit to the idea proposed in Puduppully et al. (2019a) , where a single model is not attempting to learn everything, the document plan is learned separately. We would extend such ideas to the data itself, if we can use both the known ontology from the structured data, as well as relationships and other information extracted with NLU or other tools, then this additional information could be input to systems which realize the language, meaning they are not left to solve both data analytic and language problems with a single model. If we can define our data operations in terms of standard data models, such as relational models, then we will be closer to a general approach for filling the information gap in data-to-text.",
                "cite_spans": [
                    {
                        "start": 115,
                        "end": 140,
                        "text": "Puduppully et al. (2019a)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and Future Work",
                "sec_num": "7"
            },
            {
                "text": "This is configurable in the encoder of Rebuffel et al. (2020), although we did not change it.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "https://en.wikipedia.org/wiki/ Double-double",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "https://github.com/nlgcat/adding_data",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank our reviewers for their insightful feedback and questions.The work presented here is partially funded by the Engineering and Physical Sciences Research Council (EPSRC), which funds Craig Thomson under a National Productivity Investment Fund Doctoral Studentship (EP/R512412/1).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Semantic noise matters for neural natural language generation",
                "authors": [
                    {
                        "first": "Ond\u0159ej",
                        "middle": [],
                        "last": "Du\u0161ek",
                        "suffix": ""
                    },
                    {
                        "first": "David",
                        "middle": [
                            "M"
                        ],
                        "last": "Howcroft",
                        "suffix": ""
                    },
                    {
                        "first": "Verena",
                        "middle": [],
                        "last": "Rieser",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 12th International Conference on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "421--426",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/W19-8652"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Ond\u0159ej Du\u0161ek, David M. Howcroft, and Verena Rieser. 2019. Semantic noise matters for neural natural lan- guage generation. In Proceedings of the 12th Inter- national Conference on Natural Language Genera- tion, pages 421-426, Tokyo, Japan. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Table-to-text generation with effective hierarchical encoder on three dimensions (row, column and time)",
                "authors": [
                    {
                        "first": "Xiaocheng",
                        "middle": [],
                        "last": "Heng Gong",
                        "suffix": ""
                    },
                    {
                        "first": "Bing",
                        "middle": [],
                        "last": "Feng",
                        "suffix": ""
                    },
                    {
                        "first": "Ting",
                        "middle": [],
                        "last": "Qin",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Liu",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
                "volume": "",
                "issue": "",
                "pages": "3143--3152",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/D19-1310"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Heng Gong, Xiaocheng Feng, Bing Qin, and Ting Liu. 2019. Table-to-text generation with effective hier- archical encoder on three dimensions (row, column and time). In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Process- ing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3143-3152, Hong Kong, China. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Learning to select, track, and generate for data-to-text",
                "authors": [
                    {
                        "first": "Hayate",
                        "middle": [],
                        "last": "Iso",
                        "suffix": ""
                    },
                    {
                        "first": "Yui",
                        "middle": [],
                        "last": "Uehara",
                        "suffix": ""
                    },
                    {
                        "first": "Tatsuya",
                        "middle": [],
                        "last": "Ishigaki",
                        "suffix": ""
                    },
                    {
                        "first": "Hiroshi",
                        "middle": [],
                        "last": "Noji",
                        "suffix": ""
                    },
                    {
                        "first": "Eiji",
                        "middle": [],
                        "last": "Aramaki",
                        "suffix": ""
                    },
                    {
                        "first": "Ichiro",
                        "middle": [],
                        "last": "Kobayashi",
                        "suffix": ""
                    },
                    {
                        "first": "Yusuke",
                        "middle": [],
                        "last": "Miyao",
                        "suffix": ""
                    },
                    {
                        "first": "Naoaki",
                        "middle": [],
                        "last": "Okazaki",
                        "suffix": ""
                    },
                    {
                        "first": "Hiroya",
                        "middle": [],
                        "last": "Takamura",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "2102--2113",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/P19-1202"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Hayate Iso, Yui Uehara, Tatsuya Ishigaki, Hiroshi Noji, Eiji Aramaki, Ichiro Kobayashi, Yusuke Miyao, Naoaki Okazaki, and Hiroya Takamura. 2019. Learning to select, track, and generate for data-to-text. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2102-2113, Florence, Italy. Associa- tion for Computational Linguistics.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Tangled up in BLEU: Reevaluating the evaluation of automatic machine translation evaluation metrics",
                "authors": [
                    {
                        "first": "Nitika",
                        "middle": [],
                        "last": "Mathur",
                        "suffix": ""
                    },
                    {
                        "first": "Timothy",
                        "middle": [],
                        "last": "Baldwin",
                        "suffix": ""
                    },
                    {
                        "first": "Trevor",
                        "middle": [],
                        "last": "Cohn",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "4984--4997",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/2020.acl-main.448"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Nitika Mathur, Timothy Baldwin, and Trevor Cohn. 2020. Tangled up in BLEU: Reevaluating the eval- uation of automatic machine translation evaluation metrics. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 4984-4997, Online. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Bleu: a method for automatic evaluation of machine translation",
                "authors": [
                    {
                        "first": "Kishore",
                        "middle": [],
                        "last": "Papineni",
                        "suffix": ""
                    },
                    {
                        "first": "Salim",
                        "middle": [],
                        "last": "Roukos",
                        "suffix": ""
                    },
                    {
                        "first": "Todd",
                        "middle": [],
                        "last": "Ward",
                        "suffix": ""
                    },
                    {
                        "first": "Wei-Jing",
                        "middle": [],
                        "last": "Zhu",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "311--318",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the 40th annual meeting of the Association for Compu- tational Linguistics, pages 311-318.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Data-to-text generation with content selection and planning",
                "authors": [
                    {
                        "first": "Ratish",
                        "middle": [],
                        "last": "Puduppully",
                        "suffix": ""
                    },
                    {
                        "first": "Li",
                        "middle": [],
                        "last": "Dong",
                        "suffix": ""
                    },
                    {
                        "first": "Mirella",
                        "middle": [],
                        "last": "Lapata",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 33rd AAAI Conference on Artificial Intelligence",
                "volume": "33",
                "issue": "",
                "pages": "6908--6915",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ratish Puduppully, Li Dong, and Mirella Lapata. 2019a. Data-to-text generation with content selec- tion and planning. In Proceedings of the 33rd AAAI Conference on Artificial Intelligence, volume 33, pages 6908-6915, Honolulu, Hawaii.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Data-to-text generation with entity modeling",
                "authors": [
                    {
                        "first": "Ratish",
                        "middle": [],
                        "last": "Puduppully",
                        "suffix": ""
                    },
                    {
                        "first": "Li",
                        "middle": [],
                        "last": "Dong",
                        "suffix": ""
                    },
                    {
                        "first": "Mirella",
                        "middle": [],
                        "last": "Lapata",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "2023--2035",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/P19-1195"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Ratish Puduppully, Li Dong, and Mirella Lapata. 2019b. Data-to-text generation with entity model- ing. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2023-2035, Florence, Italy. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "A hierarchical model for data-to-text generation",
                "authors": [
                    {
                        "first": "Cl\u00e9ment",
                        "middle": [],
                        "last": "Rebuffel",
                        "suffix": ""
                    },
                    {
                        "first": "Laure",
                        "middle": [],
                        "last": "Soulier",
                        "suffix": ""
                    },
                    {
                        "first": "Geoffrey",
                        "middle": [],
                        "last": "Scoutheeten",
                        "suffix": ""
                    },
                    {
                        "first": "Patrick",
                        "middle": [],
                        "last": "Gallinari",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Advances in Information Retrieval",
                "volume": "",
                "issue": "",
                "pages": "65--80",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cl\u00e9ment Rebuffel, Laure Soulier, Geoffrey Scoutheeten, and Patrick Gallinari. 2020. A hierarchical model for data-to-text generation. In Advances in Information Retrieval, pages 65-80, Cham. Springer International Publishing.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "A structured review of the validity of BLEU",
                "authors": [
                    {
                        "first": "Ehud",
                        "middle": [],
                        "last": "Reiter",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Computational Linguistics",
                "volume": "44",
                "issue": "3",
                "pages": "393--401",
                "other_ids": {
                    "DOI": [
                        "10.1162/coli_a_00322"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Ehud Reiter. 2018. A structured review of the validity of BLEU. Computational Linguistics, 44(3):393- 401.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "An investigation into the validity of some metrics for automatically evaluating natural language generation systems",
                "authors": [
                    {
                        "first": "Ehud",
                        "middle": [],
                        "last": "Reiter",
                        "suffix": ""
                    },
                    {
                        "first": "Anja",
                        "middle": [],
                        "last": "Belz",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Computational Linguistics",
                "volume": "35",
                "issue": "4",
                "pages": "529--558",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ehud Reiter and Anja Belz. 2009. An investigation into the validity of some metrics for automatically evalu- ating natural language generation systems. Compu- tational Linguistics, 35(4):529-558.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Sportsett:basketball -a robust and maintainable dataset for natural language generation",
                "authors": [
                    {
                        "first": "Craig",
                        "middle": [],
                        "last": "Thomson",
                        "suffix": ""
                    },
                    {
                        "first": "Ehud",
                        "middle": [],
                        "last": "Reiter",
                        "suffix": ""
                    },
                    {
                        "first": "Somayajulu Gowri",
                        "middle": [],
                        "last": "Sripada",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of IntelLanG",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Craig Thomson, Ehud Reiter, and Somayajulu Gowri Sripada. 2020. Sportsett:basketball -a robust and maintainable dataset for natural language generation. In Proceedings of IntelLanG 2020.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Towards ecologically valid research on language user interfaces",
                "authors": [
                    {
                        "first": "Dzmitry",
                        "middle": [],
                        "last": "Harm De Vries",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [],
                        "last": "Bahdanau",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Harm de Vries, Dzmitry Bahdanau, and Christopher Manning. 2020. Towards ecologically valid re- search on language user interfaces.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Revisiting challenges in datato-text generation with fact grounding",
                "authors": [
                    {
                        "first": "Hongmin",
                        "middle": [],
                        "last": "Wang",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 12th International Conference on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "311--322",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/W19-8639"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Hongmin Wang. 2019. Revisiting challenges in data- to-text generation with fact grounding. In Proceed- ings of the 12th International Conference on Nat- ural Language Generation, pages 311-322, Tokyo, Japan. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Challenges in data-to-document generation",
                "authors": [
                    {
                        "first": "Sam",
                        "middle": [],
                        "last": "Wiseman",
                        "suffix": ""
                    },
                    {
                        "first": "Stuart",
                        "middle": [],
                        "last": "Shieber",
                        "suffix": ""
                    },
                    {
                        "first": "Alexander",
                        "middle": [],
                        "last": "Rush",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "2253--2263",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/D17-1239"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Sam Wiseman, Stuart Shieber, and Alexander Rush. 2017a. Challenges in data-to-document generation. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2253-2263, Copenhagen, Denmark. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Challenges in data-to-document generation",
                "authors": [
                    {
                        "first": "Sam",
                        "middle": [],
                        "last": "Wiseman",
                        "suffix": ""
                    },
                    {
                        "first": "Stuart",
                        "middle": [
                            "M"
                        ],
                        "last": "Shieber",
                        "suffix": ""
                    },
                    {
                        "first": "Alexander",
                        "middle": [
                            "M"
                        ],
                        "last": "Rush",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sam Wiseman, Stuart M. Shieber, and Alexander M. Rush. 2017b. Challenges in data-to-document gen- eration.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF1": {
                "type_str": "table",
                "html": null,
                "text": "",
                "content": "<table/>",
                "num": null
            },
            "TABREF2": {
                "type_str": "table",
                "html": null,
                "text": "\u00b1 0.386 0.70 \u00b1 0.021 0.39 \u00b1 0.015 0.38 \u00b1 0.009 0.19 \u00b1 0.006 D2 BLEU 17.39 \u00b1 1.189 0.75 \u00b1 0.034 0.43 \u00b1 0.033 0.40 \u00b1 0.019 0.21 \u00b1 0.015 D1 RG 16.97 \u00b1 0.435 0.71 \u00b1 0.016 0.38 \u00b1 0.015 0.38 \u00b1 0.009 0.18 \u00b1 0.008 D2 RG 17.00 \u00b1 1.207 0.77 \u00b1 0.029 0.42 \u00b1 0.028 0.40 \u00b1 0.013 0.21 \u00b1 0.009 D1 CS-PREC 17.08 \u00b1 0.358 0.71 \u00b1 0.018 0.39 \u00b1 0.012 0.38 \u00b1 0.009 0.19 \u00b1 0.007",
                "content": "<table><tr><td colspan=\"2\">Dataset Stopping Metric</td><td>BLEU</td><td>RG</td><td>CS-PREC</td><td>CS-REC</td><td>CO</td></tr><tr><td colspan=\"7\">D1 17.18 D2 BLEU CS-PREC 17.30 \u00b1 1.301 0.76 \u00b1 0.026 0.44 \u00b1 0.034 0.41 \u00b1 0.015 0.21 \u00b1 0.015</td></tr><tr><td>D1</td><td>CS-REC</td><td colspan=\"5\">17.12 \u00b1 0.314 0.71 \u00b1 0.017 0.39 \u00b1 0.011 0.38 \u00b1 0.007 0.19 \u00b1 0.005</td></tr><tr><td>D2</td><td>CS-REC</td><td colspan=\"5\">17.27 \u00b1 1.191 0.77 \u00b1 0.026 0.43 \u00b1 0.029 0.41 \u00b1 0.015 0.21 \u00b1 0.013</td></tr><tr><td>D1</td><td>CO</td><td colspan=\"5\">17.09 \u00b1 0.540 0.70 \u00b1 0.022 0.39 \u00b1 0.012 0.38 \u00b1 0.010 0.19 \u00b1 0.003</td></tr><tr><td>D2</td><td>CO</td><td colspan=\"5\">17.34 \u00b1 1.348 0.76 \u00b1 0.025 0.44 \u00b1 0.034 0.41 \u00b1 0.014 0.22 \u00b1 0.011</td></tr><tr><td>Gold</td><td>N/A</td><td>-</td><td>0.92</td><td>-</td><td>-</td><td>-</td></tr></table>",
                "num": null
            },
            "TABREF3": {
                "type_str": "table",
                "html": null,
                "text": "Experiment results; Comparing D1 to D2 within every cell pair is statistically significant (p < 0.005) with the exception of entries in the BLEU column. Note that BLEU, CS, and CO all inherently achieve 100% on gold standard texts.",
                "content": "<table/>",
                "num": null
            }
        }
    }
}