File size: 63,925 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
{
    "paper_id": "2021",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:33:51.857841Z"
    },
    "title": "SHAPELURN: An Interactive Language Learning Game with Logical Inference",
    "authors": [
        {
            "first": "Katharina",
            "middle": [],
            "last": "Stein",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Saarland University",
                "location": {
                    "country": "Germany"
                }
            },
            "email": "kstein@coli.uni-saarland.de"
        },
        {
            "first": "Leonie",
            "middle": [],
            "last": "Harter",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Saarland University",
                "location": {
                    "country": "Germany"
                }
            },
            "email": "leonieh@coli.uni-saarland.de"
        },
        {
            "first": "Luisa",
            "middle": [],
            "last": "Geiger",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Saarland University",
                "location": {
                    "country": "Germany"
                }
            },
            "email": "lgeiger@coli.uni-saarland.de"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We investigate if a model can learn natural language with minimal linguistic input through interaction. Addressing this question, we design and implement an interactive language learning game that learns logical semantic representations compositionally. Our game allows us to explore the benefits of logical inference for natural language learning. Evaluation shows that the model can accurately narrow down potential logical representations for words over the course of the game, suggesting that our model is able to learn lexical mappings from scratch successfully.",
    "pdf_parse": {
        "paper_id": "2021",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We investigate if a model can learn natural language with minimal linguistic input through interaction. Addressing this question, we design and implement an interactive language learning game that learns logical semantic representations compositionally. Our game allows us to explore the benefits of logical inference for natural language learning. Evaluation shows that the model can accurately narrow down potential logical representations for words over the course of the game, suggesting that our model is able to learn lexical mappings from scratch successfully.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "An open question in NLP research is how models can learn natural language most successfully and effectively. Many state-of-the-art semantic parsers and machine learning algorithms are dependent on large data sets for successful training. This poses a problem when using NLP applications for lowresource languages or specific domains for which only little annotated training data is available if any at all (Klie et al., 2020) . Interactive NLP Systems can overcome these problems as they start with a small or even empty set of training data that gets extended based on user feedback for the predictions the model makes based on its current parameters (Lee et al., 2020) . Therefore, learning mappings from natural language to formal representations through interaction with a user is an attractive approach for low-resource settings. The model parameters are optimized based on feedback and the resulting data itself can be used as training data for other models avoiding costly manual annotations.",
                "cite_spans": [
                    {
                        "start": 406,
                        "end": 425,
                        "text": "(Klie et al., 2020)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 652,
                        "end": 670,
                        "text": "(Lee et al., 2020)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "However, the interaction with a not yet fully trained model can get monotonous or can lead to frustration on the part of the users if they do not benefit from the interaction themselves (Lee et al., 2020) . Wang et al. (2016) present an interactive language learning setting, called SHRDLURN, in which a model learns a language by interacting with a player in a game environment, hence making the interactive learning setting more attractive and fun for users. Their model is language independent and can be taught any language from scratch.",
                "cite_spans": [
                    {
                        "start": 186,
                        "end": 204,
                        "text": "(Lee et al., 2020)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 207,
                        "end": 225,
                        "text": "Wang et al. (2016)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Here we follow Wang et al. (2016) and design and implement an interactive language learning game where a model learns to map natural language to logical forms in a compositional way based on the feedback provided by the player 1 . Whereas Wang et al. 2016's model learns to map instructions to executable logical forms, we aim to learn logical formulas that evaluate to truth values with respect to the current state of the game environment. This decision was taken because the additional information about the truth can be incorporated in the parsing and learning process in order to already restrict the potential logical formulas. Overall, we are trying to answer the following research questions: Can we implement a model that 1) can learn a natural language from scratch only from interacting with a user and 2) is not dependent on any language specific syntax and is hence language independent.",
                "cite_spans": [
                    {
                        "start": 15,
                        "end": 33,
                        "text": "Wang et al. (2016)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Several approaches map natural language to logical form for its ability to model inferences. Zettlemoyer and Collins (2005) present a learning algorithm for mapping sentences to their lambda calculus semantic representations and automatically inducing a combinatory categorical grammar (CCG). Zettlemoyer and Collins (2007) extend this algorithm to make the grammar more flexible. Pasupat and Liang (2015) also present a flexible semantic parsing framework, the floating parser, for learning mappings from natural language to logical forms in the lambda dependency-based compositional semantic language (Liang, 2013) . Liang and Potts (2015) present a framework for learning to map natural language utterances to logical forms that combines the principle of compositionality with a standard machine learning algorithm.",
                "cite_spans": [
                    {
                        "start": 293,
                        "end": 323,
                        "text": "Zettlemoyer and Collins (2007)",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 381,
                        "end": 405,
                        "text": "Pasupat and Liang (2015)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 603,
                        "end": 616,
                        "text": "(Liang, 2013)",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 619,
                        "end": 641,
                        "text": "Liang and Potts (2015)",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Current approaches that aim at overcoming the need for costful annotated training data include the interactive human-in-the-loop method where a human corrects annotations predicted by a machine learning system and this feedback is used to improve future predictions. Klie et al. (2020) use this approach for the task of Entity Linking and He et al. (2016) apply the approach to a CCG parser, thereby improving parsing performance. Goldwasser and Roth (2014) present a learning approach where a model learns to map natural language instructions to logical representations of solitaire game rules based on feedback. Finally, Zhang et al. (2018) present a game for grounded language acquisition where a human teaches an agent language from scratch in a natural language conversation.",
                "cite_spans": [
                    {
                        "start": 267,
                        "end": 285,
                        "text": "Klie et al. (2020)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 339,
                        "end": 355,
                        "text": "He et al. (2016)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 431,
                        "end": 457,
                        "text": "Goldwasser and Roth (2014)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 623,
                        "end": 642,
                        "text": "Zhang et al. (2018)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "3 From SHRDLURN to SHAPELURN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Wang et al. 2016designed their model to perform a block building task in 3-D space using natural language instructions from the player. The computer and player work together towards a shared goal (a specific block position) while only the player knows the goal and only the computer can move the blocks. The more successful the model learns the human's language, the faster this shared goal can be reached (Wang et al., 2016) .",
                "cite_spans": [
                    {
                        "start": 406,
                        "end": 425,
                        "text": "(Wang et al., 2016)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Game Design",
                "sec_num": "3.1"
            },
            {
                "text": "Based on this idea, we design a 2-D game environment where the model and player work towards teaching the model a language with the user giving descriptive input about the environment. The user is presented with a randomly generated picture displaying varying numbers of objects which have one of three shapes (circle, square, triangle) and one of four colors (red, blue, yellow, green) (see Figure  1 ). The picture corresponds to a 4 \u00d7 4-grid which is internally represented as a matrix allowing for simplified spatial calculations.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 392,
                        "end": 401,
                        "text": "Figure  1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Game Design",
                "sec_num": "3.1"
            },
            {
                "text": "The user is asked to describe one or more of the displayed objects by typing in one phrase in a language of their choice. Importantly, this language should be kept consistent in order for the computer to recognize language specific patterns. The program proceeds by parsing the input into Figure 1 : The interface displaying three randomly generated objects the user can use to build an utterance a logical formula, comparing it to the matrix and then making a guess on which object(s) the user was referring to by marking them with a black border. The user can then provide feedback in terms of selecting the right marking by skipping through the computer's guesses which show up in descending order according to their probability (see A.1). Like this, the feedback is specific enough for the model to learn lexical mappings.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 289,
                        "end": 297,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Game Design",
                "sec_num": "3.1"
            },
            {
                "text": "This process is repeated over 4 levels of alternating complexity (regarding both the number of displayed blocks and the length of the input) each consisting of 20 (level 2) or 15 (other levels) pictures. The learning algorithm is responsible for the guesses to improve as the game proceeds and lets the model adapt to the input language.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Game Design",
                "sec_num": "3.1"
            },
            {
                "text": "We tokenize, lowercase and stem the input, since learning is much quicker if it is clear that e.g. triangle and triangles refer to the same shape. In order to stem language independently, we use a cosine similarity based heuristic. To compare two tokens, we transform them into vectors with length of the longer one. If they have the same character at a position, the vectors get a 1 there. Otherwise one vector gets a 1 and the other a -1. If the cosine similarity of the vectors is > 0.65, we assume the tokens to belong to the same word (see Figure 2 ). Since Liang and Potts (2015)'s framework 2 , which we use as groundwork, employs a CYK parser that forces players to adhere to a strict syntax, we equipped it with Pasupat and Liang (2015)'s floating parser instead. This parser stores intermediate results in a chart according to their semantic category c and size s (Figure 3 ), but does not consider which indices the covered tokens span. This allows to parse syntactic structures without binding the user to a certain syntax. We adjust the three derivation rules for parsing to our grammar:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 545,
                        "end": 553,
                        "text": "Figure 2",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 874,
                        "end": 883,
                        "text": "(Figure 3",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Preprocessing and Parsing",
                "sec_num": "3.2"
            },
            {
                "text": "(1) (T okenSpan, i, j)[s] \u2192 (c, 1)[f ] (2) \u2205 \u2192 (c, 1)[f ] (3) (c1, s1)[f1] + (c2, s2)[f2] \u2192 (c, s1 + s2)[f1(f2)]",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Preprocessing and Parsing",
                "sec_num": "3.2"
            },
            {
                "text": "Rule (1) is a lexical rule matching a token span from index i-j to a rule in the lexicon entry of the word in this span telling us which category c and which function f to use. Rule (2) allows us to establish lexical logical forms matching words we have not seen in the input and proceeds with these \"imaginary tokens\" as in (1). It is used to create the formula in the parse chart field (E,1) in Figure  3 . Rule (3) combines parse items of categories the grammar allows to combine. We only allow each token to be used once per parse.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 397,
                        "end": 406,
                        "text": "Figure  3",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Preprocessing and Parsing",
                "sec_num": "3.2"
            },
            {
                "text": "Previous work used beam search to address the huge number of possible parse trees (Wang et al., 2016; Pasupat and Liang, 2015). However, as beam search does not guarantee that the correct parse is kept we decided to not use a heuristic approach for pruning. Instead, we restrict the number of parses by building only formulas up to the size corresponding to the number of tokens in the input plus four. Additionally, we allow each token to be mapped to only one lexical rule per parse. This averts building non-sense constructions like (exist([2])(blue(BF(triangle,all))))(exist([1])(blue(BF(square,all)))) for the utterance \"two triangles and one blue square\" (considering the picture in Figure 1 ). ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 689,
                        "end": 697,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Preprocessing and Parsing",
                "sec_num": "3.2"
            },
            {
                "text": "For our grammar we use the same overall structure as Liang and Potts (2015) (see A.3 for the complete grammar). The main information is encoded in the lexicon which is a dictionary that pairs words with a list of corresponding lexical rules. A lexical rule is a triple of a category (B, C, E, N, POS or CONJ), a logical form and a weight. For example, the word \"red\" is paired with (C, red, w), where C is the category, red the logical form as defined by the grammar and w the current weight.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar",
                "sec_num": "3.3"
            },
            {
                "text": "The logical formulas for entries of category B and N are evaluated directly whereas the other logical forms are functions whose evaluation is specified separately using lambda calculus. For example, red is defined as the function \u03bbx(BF (red, x)) where BF (condition, list) is a function that yields all blocks from list that fulfill condition.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar",
                "sec_num": "3.3"
            },
            {
                "text": "We use a set of binary CFG rules to define which categories can be combined and how logical formulas are applied to each other to yield larger formulas, e.g. BC \u2192 C B specifies that formulas of category C and B can be combined to a formula of category BC by applying C to B.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar",
                "sec_num": "3.3"
            },
            {
                "text": "Each completed formula V is composed of at least one sub-formula of the categories B, N, E and C, and the categories POS and CONJ allow to build more complex formulas for inputs including relative positions and conjunctions. 3 Descriptions not specifying a color are handled by rule (2) of the floating parser that introduces a lexical rule of category C with an empty condition into the parse. For lower parsing complexity, users are instructed to mention only the objects, e.g. \"a circle\" instead of \"there is a circle\". We model the implicit existential quantification with a lexical rule for exists that gets introduced into each formula by rule (2).",
                "cite_spans": [
                    {
                        "start": 225,
                        "end": 226,
                        "text": "3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammar",
                "sec_num": "3.3"
            },
            {
                "text": "Like Wang et al. (2016), we aim to learn correct lexical rule(s) for all words in the lexicon. Initially, every new word gets paired with every lexical rule. Following Zettlemoyer and Collins (2005) 's approach in a simplified way, we delete the most unlikely pairs during training leaving the correct ones remain. We use Liang and Potts (2015)'s learning algorithm, a linear regression model optimized with stochastic gradient descent (SGD), which returns weight changes for word-rule pairs improving the model. Whereas Wang et al. (2016)'s features consist of n-grams and skip-grams for the utterance, tree-grams for the formulas and a formula depth, our features only contain a list of word-rule pairs. This is sufficient, since the formula's structure and depth and the distances between combinable words are handled by the floating parser.",
                "cite_spans": [
                    {
                        "start": 168,
                        "end": 198,
                        "text": "Zettlemoyer and Collins (2005)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Algorithm",
                "sec_num": "3.4"
            },
            {
                "text": "After a training round we get weight changes for all words in the input paired with the rules used to build the gold standard logical formulas and the ones predicted by the model. We sum up the weights for each pair after every training step. If a weight sum reaches the lower threshold of -0.1, we delete this rule for the corresponding word. If all pairs get weight change 0, SGD has converged, so the model is optimal for the current training batch. Hence, the word rule pairs used to build the formulas for the current training utterance must be the correct ones and all others can be deleted. If a weight sum reaches the upper threshold of 1.0, we assume this rule to be correct and delete all other rules for the word with weight sum \u2264 0.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Algorithm",
                "sec_num": "3.4"
            },
            {
                "text": "As different formulas can have the same denotation (see Figure 4) , we group all formulas evaluating to true by the guessed blocks they evaluate to. Only these guesses are then presented to the player. The formulas leading to the correct blocks, as determined by the user feedback, are used as gold standard training batch. During training we collate all possible parses. Otherwise too much information is lost, which causes deletions of correct rules. Liang and Potts (2015)'s cost function gave us too few weight changes = 0. Therefore, we average over all rules of a formula if this rule was also used in the gold formulas (value 1) or not (value 0): ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 56,
                        "end": 65,
                        "text": "Figure 4)",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "Learning Algorithm",
                "sec_num": "3.4"
            },
            {
                "text": "1 n n i=1 0 if (w i , r i ) in",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Learning Algorithm",
                "sec_num": "3.4"
            },
            {
                "text": "For a preliminary evaluation of the performance of the model we collected and analyzed data of seven participants who played the game in English (3), Spanish (1), German (2) and Esperanto (1). One of the participants completed two levels, four three levels and only two completed all four levels.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "We planned to follow Wang et al. 2016and count how often the user needs to click \"NEXT\" i.e. how far down the ranked parses the correct solution is. For our project to be viewed as successful, this number should decrease over the course of the game. Due to the simple game design, the exclusion of formulas evaluating to false and the grouping of identical markings, the total number possible markings is very low throughout the whole game and so is the number of clicks needed to arrive at the desired one (M = 1.27). But because of our simplified game setting, this cannot be directly compared to Wang et al. (2016)'s results. Since the number of clicks almost does not change in our case, it is not a meaningful measure for evaluating the improvement of the model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "To assess performance within the model itself, we analyze the remaining rules for each word. Initially, there are 20 possible rules per word as each new word gets paired with each rule from the lexicon and ideally exactly the correct rule(s) for each word should be left finally. Figure 5 shows the average number of rules per word left after each level. As our data set is very small the results have to be taken with a grain of salt. Nevertheless, the plot reveals that the model was able to decrease the number of possible rules per word by about 15 (see A.2). Manual investigation of the remaining rules at the end of level 3 revealed a total of 1202 deletions (63.9%), out of which only seven were falsely deleted (0.58%). This indicates that our model is able to successfully exclude incorrect mappings.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 280,
                        "end": 288,
                        "text": "Figure 5",
                        "ref_id": "FIGREF4"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "4"
            },
            {
                "text": "Although, the setting of our language learning game is simpler than SHRDLURN, the 2-D grid environment allows to elicit different kinds of inputs that involve the composition of colors and shapes as well as relative spatial relations between objects that can be nested. In contrast to Wang et al. (2016), the player task in our game is not to give instructions to reach a specified goal state from the current state but to describe some part of the current state of the game (picture). Although we restrict the complexity of the descriptions in the first levels to improve the first learning phase, the player task is in general a very open task as there are many ways to describe objects in a grid and the player can freely choose the objects to describe as well as the properties used to reference them. This makes the setting particularly interactive and allows to investigate in which ways humans choose and formulate their descriptions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "The design of the player task allows us to use the truth values of the parses in order to present only the true guesses to the player. Hence, the number of potential denotations is limited by the number of possibilities to mark different combinations of objects in the picture. This decreases the number of denotations the player can choose from compared to Wang et al. (2016) where the number of potential successor states for a current state is much higher. Although our design inhibits to use the number of clicks during the game as evaluation measurement, we see the overall low number of guesses as an advantage: the player spends less time on clicking through wrong guesses even in cases where the correct denotation is ranked very low what can improve the playing and success experience.",
                "cite_spans": [
                    {
                        "start": 358,
                        "end": 376,
                        "text": "Wang et al. (2016)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "We find that the informativeness of feedback plays a crucial role in interactive learning. Our results indicate that making the current \"knowledge\" of the computer as explicit as possible, e.g. by marking all blocks mentioned in the input, could be a promising starting point as simple user feedback can provide enough information for learning.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "During testing we found that the learning progress depends on specific combinations of descriptions and pictures. Learning appears to benefit from situations where the correct formula for the description differs in one lexical rule from other true parses: \"a red circle\" is more informative with respect to the meaning of \"red\" for a picture that shows a red circle and a circle in another color than for a picture displaying only one (red) circle.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "The main advantage of using only input from the player to train the model is its independence of the availability of (annotated) training data as opposed to approaches requiring large data sets such as neural approaches. Hence, our approach is applicable for low resource settings. However, our model requires a grammar that covers all semantic concepts that can be part of the interaction. Due to our game design, the number of concepts our grammar needs to address is very limited but extending the domain requires increasing the number of handwritten rules. Therefore, scaling the model to larger domains would require the costful construction of a large-scale grammar.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "Concerning the scalability of the game environment, the model could be easily adjusted to create more complex pictures as long as an adequate internal representation is found.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "A key challenge of our work was the high uncertainty of the model. The computer has no initial knowledge about language and must consider each lexicon entry for each word. Additionally, the floating parser can combine the corresponding logical formulas in any order, discard tokens from the input and add additional logical forms. The number of parses is thus huge for short sentences and grows exponentially with sentence length, vastly increasing parsing and learning times. Future work could use beam search at higher levels to handle this.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "We implement an interactive language learning game where the computer learns natural language based on user feedback. We find that learning interactively from scratch with a language independent model is complex due to the huge number of potential parses. Our results indicate that our model is able to learn language through interaction and low-resource domains and languages could benefit from such an approach. Future work will address the trade-off between increasing flexibility and increasing processing times.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "A.1 Game Design",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Appendices",
                "sec_num": null
            },
            {
                "text": "Welcome to SHAPELURN, where you can teach the computer any language of your choice! You will be looking at different pictures and describing them to the computer in one sentence. There will be four levels with different constraints on the descriptions. Please use short sentences in the first two levels and do not use negation at all. 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "0",
                "sec_num": null
            },
            {
                "text": "Use only the shapes and/or the number of blocks for your description e.g.: 'a circle' or 'two forms' 2 You can additionally describe the blocks by color e.g: 'two blue forms' 3 Now you can describe relations between blocks and use conjunction (please don't use colors) e.g.: 'a circle under a square' 4 Describe whatever you want! Table 1 : The overall instructions for the input descriptions (0) and the level specific constraints for the descriptions. Figure 6 -8 illustrate the course of the game for an example picture and the input description \"two triangles\". The user is shown a picture and enters a description. Then the computer makes a guess and the user clicks NEXT until the correct guess is shown. over \u03bbn(\u03bbx(\u03bby(P T (y, x, n, \"o\")))) 8 under \u03bbn(\u03bbx(\u03bby(P T (y, x, n, \"u\")))) 9 next \u03bbn(\u03bbx(\u03bby(P T (y, x, n, \"n\")))) 10 lef t \u03bbn(\u03bbx(\u03bby(P T (y, x, n, \"l\")))) 11 right \u03bbn(\u03bbx(\u03bby(P T (y, x, n, \"r\")))) 12 und \u03bbv1(\u03bbv2(v1 and v2)) 13 oder \u03bbv1(\u03bbv2(v1 or v2)) 14 xoder \u03bbv1(\u03bbv2((v1 and not v2)or(v2 and not v1)))) Function BF (condition, x) returns all blocks from the list x that fulfill condition condition Function P T (y, x, n, \"pos\") returns all blocks from the list y that stand in position \"pos\" to n blocks from list x Function update guess(b) returns a list of all mentioned blocks by recursively backtracking from the blocks in list b Table 5 : The functions used to interpret the logical forms for the categories E, C, POS and CONJ.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 331,
                        "end": 338,
                        "text": "Table 1",
                        "ref_id": null
                    },
                    {
                        "start": 454,
                        "end": 462,
                        "text": "Figure 6",
                        "ref_id": null
                    },
                    {
                        "start": 1342,
                        "end": 1349,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "0",
                "sec_num": null
            },
            {
                "text": "1 V \u2192 EN BC 2 V \u2192 CONJ 1 V 3 CONJ 1 \u2192 CONJ V 4 EN \u2192 E N 5 BC \u2192 C B 6 BC \u2192 POS NB BC 7 POS NB \u2192 POS N BC 8 POS N \u2192 POS N",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "0",
                "sec_num": null
            },
            {
                "text": "Logical Form Denotation 1 B \u2192 circle BF(circle, all) the list with all blocks of the picture with shape circle 2 N \u2192 one [1] [1] 3 C \u2192 blue blue the C s.t. C(x) returns a list of all blocks of x with color blue 4 POS \u2192 over over the POS s.t. POS(y)(x)(n) returns the sublist of blocks from y that are located over n blocks from x 5 E \u2192 \u2205 exist the E s.t. E(b)(n) returns True if length of b satisfies n and False otherwise 6 BC \u2192 C B C(B) application of denotation of C to denotation of B 7 V \u2192 EN BC EN(BC) application of denotation of EN to denotation of BC 8 EN \u2192 E N E(n) application of denotation of E to denotation of n Input utterance: \"one blue square over a red triangle\" Logical Form: exist([1])(over(range(1, 17))(blue(BF (square, all)))(red(BF (triangle, all)))) Denotation: True and the list of guessed blocks consisting of the blue square and the red triangle Table 6 : Illustration of the way in which the grammar works for the example \"[there is] one blue square over a red triangle\". The upper part shows the lexical rules and the mid part the combination rules needed for the example sentence. The lower part shows the input utterance with its simplified logical form and the corresponding denotation with respect to the picture in Figure 1 in the paper.",
                "cite_spans": [
                    {
                        "start": 121,
                        "end": 124,
                        "text": "[1]",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 874,
                        "end": 881,
                        "text": "Table 6",
                        "ref_id": null
                    },
                    {
                        "start": 1250,
                        "end": 1258,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "CFG",
                "sec_num": null
            },
            {
                "text": "The complete code is available under https:// github.com/itsLuisa/SHAPELURN",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "https://github.com/cgpotts/annualreview-complearning",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The denotation of a formula V consists of the truth of the description w.r.t. the picture and the list of blocks that make the description true.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank Dr. Lucia Donatelli for the valuable discussions and support throughout the project development and writing process. Further, we would like to express our gratitude towards the participants of our evaluation experiment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            },
            {
                "text": "Lexical Rule: (category, logical form, weight)Example Word 1 (B, BF ([\u03bbb(b. shape == \"rectangle\")], all), w) \"square\" 2 (B, BF ([\u03bbb(b.shape == \"circle\")], all), w) \"circle\" 3 (B, BF ([\u03bbb(b.shape == \"triangle\")], all), w)\"three\" 9 (C, green, w) \"green\" 10 (C, blue, w)\"blue\" 11 (C, yellow, w)\"yellow\" 12 (C, red, w)\"red\" 13 (POS, over, w)\"over\" 14 (POS, under, w) \"under\" 15 (POS, next, w)\" (CONJ, und, w) \"and\" 19 (CONJ, oder, w) \"or\" 20 (CONJ, xoder, w) \"or\" 21 (C, anycol, w) \u2205 22 (E, exist, w) \u2205 / [there is] Function BF (condition, all) returns all blocks from the list of all blocks of the picture that fulfill condition condition Table 4 : The lexicon of the grammar where each lexical rule is triple of (category, logical form, weight) and English example words for each rule.",
                "cite_spans": [
                    {
                        "start": 61,
                        "end": 75,
                        "text": "(B, BF ([\u03bbb(b.",
                        "ref_id": null
                    },
                    {
                        "start": 347,
                        "end": 362,
                        "text": "(POS, under, w)",
                        "ref_id": null
                    },
                    {
                        "start": 390,
                        "end": 404,
                        "text": "(CONJ, und, w)",
                        "ref_id": null
                    },
                    {
                        "start": 414,
                        "end": 429,
                        "text": "(CONJ, oder, w)",
                        "ref_id": null
                    },
                    {
                        "start": 438,
                        "end": 454,
                        "text": "(CONJ, xoder, w)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 636,
                        "end": 643,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "annex",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Learning from natural instructions",
                "authors": [
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Goldwasser",
                        "suffix": ""
                    },
                    {
                        "first": "Dan",
                        "middle": [],
                        "last": "Roth",
                        "suffix": ""
                    }
                ],
                "year": 2014,
                "venue": "Machine learning",
                "volume": "94",
                "issue": "2",
                "pages": "205--232",
                "other_ids": {
                    "DOI": [
                        "10.1007/s10994-013-5407-y"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Dan Goldwasser and Dan Roth. 2014. Learning from natural instructions. Machine learning, 94(2):205- 232.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Human-in-the-loop parsing",
                "authors": [
                    {
                        "first": "Luheng",
                        "middle": [],
                        "last": "He",
                        "suffix": ""
                    },
                    {
                        "first": "Julian",
                        "middle": [],
                        "last": "Michael",
                        "suffix": ""
                    },
                    {
                        "first": "Mike",
                        "middle": [],
                        "last": "Lewis",
                        "suffix": ""
                    },
                    {
                        "first": "Luke",
                        "middle": [],
                        "last": "Zettlemoyer",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing",
                "volume": "",
                "issue": "",
                "pages": "2337--2342",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/D16-1258"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Luheng He, Julian Michael, Mike Lewis, and Luke Zettlemoyer. 2016. Human-in-the-loop parsing. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2337-2342, Austin, Texas. Association for Compu- tational Linguistics.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "From Zero to Hero: Human-In-The-Loop Entity Linking in Low Resource Domains",
                "authors": [
                    {
                        "first": "Jan-Christoph",
                        "middle": [],
                        "last": "Klie",
                        "suffix": ""
                    },
                    {
                        "first": "Richard",
                        "middle": [],
                        "last": "Eckart De Castilho",
                        "suffix": ""
                    },
                    {
                        "first": "Iryna",
                        "middle": [],
                        "last": "Gurevych",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "6982--6993",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/2020.acl-main.624"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Jan-Christoph Klie, Richard Eckart de Castilho, and Iryna Gurevych. 2020. From Zero to Hero: Human- In-The-Loop Entity Linking in Low Resource Do- mains. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 6982-6993, Online. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Empowering Active Learning to Jointly Optimize System and User Demands",
                "authors": [
                    {
                        "first": "Ji-Ung",
                        "middle": [],
                        "last": "Lee",
                        "suffix": ""
                    },
                    {
                        "first": "Christian",
                        "middle": [
                            "M"
                        ],
                        "last": "Meyer",
                        "suffix": ""
                    },
                    {
                        "first": "Iryna",
                        "middle": [],
                        "last": "Gurevych",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "4233--4247",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/2020.acl-main.390"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Ji-Ung Lee, Christian M. Meyer, and Iryna Gurevych. 2020. Empowering Active Learning to Jointly Op- timize System and User Demands. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4233-4247, On- line. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Lambda dependency-based compositional semantics",
                "authors": [
                    {
                        "first": "Percy",
                        "middle": [],
                        "last": "Liang",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "arXiv": [
                        "arXiv:1309.4408"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Percy Liang. 2013. Lambda dependency-based compo- sitional semantics. arXiv preprint arXiv:1309.4408.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Bringing machine learning and compositional semantics together",
                "authors": [
                    {
                        "first": "Percy",
                        "middle": [],
                        "last": "Liang",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [],
                        "last": "Potts",
                        "suffix": ""
                    }
                ],
                "year": 2015,
                "venue": "Annual Review of Linguistics",
                "volume": "1",
                "issue": "1",
                "pages": "355--376",
                "other_ids": {
                    "DOI": [
                        "10.1146/annurev-linguist-030514-125312"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Percy Liang and Christopher Potts. 2015. Bringing ma- chine learning and compositional semantics together. Annual Review of Linguistics, 1(1):355-376.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Compositional semantic parsing on semi-structured tables",
                "authors": [
                    {
                        "first": "Panupong",
                        "middle": [],
                        "last": "Pasupat",
                        "suffix": ""
                    },
                    {
                        "first": "Percy",
                        "middle": [],
                        "last": "Liang",
                        "suffix": ""
                    }
                ],
                "year": 2015,
                "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing",
                "volume": "1",
                "issue": "",
                "pages": "1470--1480",
                "other_ids": {
                    "DOI": [
                        "10.3115/v1/P15-1142"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Panupong Pasupat and Percy Liang. 2015. Compo- sitional semantic parsing on semi-structured tables. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Lan- guage Processing (Volume 1: Long Papers), pages 1470-1480, Beijing, China. Association for Compu- tational Linguistics.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Learning language games through interaction",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Sida",
                        "suffix": ""
                    },
                    {
                        "first": "Percy",
                        "middle": [],
                        "last": "Wang",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [
                            "D"
                        ],
                        "last": "Liang",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Manning",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics",
                "volume": "1",
                "issue": "",
                "pages": "2368--2378",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/P16-1224"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Sida I. Wang, Percy Liang, and Christopher D. Man- ning. 2016. Learning language games through in- teraction. In Proceedings of the 54th Annual Meet- ing of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2368-2378, Berlin, Germany. Association for Computational Linguis- tics.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Learning to map sentences to logical form: Structured classification with probabilistic categorial grammars",
                "authors": [
                    {
                        "first": "Luke",
                        "middle": [
                            "S"
                        ],
                        "last": "Zettlemoyer",
                        "suffix": ""
                    },
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence, UAI'05",
                "volume": "",
                "issue": "",
                "pages": "658--666",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2005. Learn- ing to map sentences to logical form: Structured classification with probabilistic categorial grammars. In Proceedings of the Twenty-First Conference on Uncertainty in Artificial Intelligence, UAI'05, page 658-666, Arlington, Virginia, USA. AUAI Press.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Online learning of relaxed ccg grammars for parsing to logical form",
                "authors": [
                    {
                        "first": "Luke",
                        "middle": [
                            "S"
                        ],
                        "last": "Zettlemoyer",
                        "suffix": ""
                    },
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Collins",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)",
                "volume": "",
                "issue": "",
                "pages": "678--687",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Luke S. Zettlemoyer and Michael Collins. 2007. On- line learning of relaxed ccg grammars for parsing to logical form. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 678-687.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Interactive language acquisition with one-shot visual concept learning through a conversational game",
                "authors": [
                    {
                        "first": "Haichao",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "Haonan",
                        "middle": [],
                        "last": "Yu",
                        "suffix": ""
                    },
                    {
                        "first": "Wei",
                        "middle": [],
                        "last": "Xu",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "DOI": [
                        "10.18653/v1/P18-1243"
                    ],
                    "arXiv": [
                        "arXiv:1805.00462"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Haichao Zhang, Haonan Yu, and Wei Xu. 2018. Inter- active language acquisition with one-shot visual con- cept learning through a conversational game. arXiv preprint arXiv:1805.00462.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Two stemming examples",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF1": {
                "text": "Parse Chart for \"one blue circle\"",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF2": {
                "text": "gold word rule pairs 1 otherwise n: number of tokens wi: token number i ri: rule applied to wi to get current formula gold word rule pairs: word rule pairs leading to gold formula",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF3": {
                "text": "Four formulas with the same denotation",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF4": {
                "text": "Mean number of rules per word left after each level. Error bars indicate 95% confidence intervals.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF5": {
                "text": "Grid generated by the model in level 1 First guess of the model, user clicks NEXTFigure 8: Next guess is correct, user clicks YES",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF6": {
                "text": "Form from Lexicon Function for interpretation 1 exist \u03bbn(\u03bbb(update guess(b) and len(b) in n)) 2 green \u03bbx(BF ([\u03bbb(b.color == \"green\")], x)) 3 blue \u03bbx(BF ([\u03bbb(b.color == \"blue\")], x)) 4 yellow \u03bbx(BF ([\u03bbb(b.color == \"yellow\")], x)) 5 red \u03bbx(BF ([\u03bbb(b.color == \"red\")], x)) 6 anycol \u03bbx(BF ([ ], x)) 7",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "TABREF1": {
                "text": "Mean and sd for the average number of rules per word in the lexicon at the end of each level",
                "type_str": "table",
                "num": null,
                "html": null,
                "content": "<table><tr><td>A.3 The Grammar</td></tr><tr><td>Rule</td></tr></table>"
            },
            "TABREF2": {
                "text": "The rules of the CFG grammar used to derive the input utterances and the logical forms",
                "type_str": "table",
                "num": null,
                "html": null,
                "content": "<table/>"
            }
        }
    }
}