File size: 53,522 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
{
    "paper_id": "2020",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T07:28:02.082725Z"
    },
    "title": "Generating Quantified Referring Expressions through Attention-Driven Incremental Perception",
    "authors": [
        {
            "first": "Gordon",
            "middle": [],
            "last": "Briggs",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Artificial Intelligence U.S. Naval Research Laboratory Washington",
                "location": {
                    "postCode": "20375",
                    "region": "DC",
                    "country": "USA"
                }
            },
            "email": "gordon.briggs@nrl.navy.mil"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We model the production of quantified referring expressions (QREs) that identity collections of visual items. A previous approach, called Perceptual Cost Pruning, modeled human QRE production using a preference-based referring expression generation algorithm, first removing facts from the input knowledge base based on a model of perceptual cost. In this paper, we present an alternative model that incrementally constructs a symbolic knowledge base through simulating human visual attention/perception from raw images. We demonstrate that this model produces the same output as Perceptual Cost Pruning. We argue that this is a more extensible approach and a step toward developing a wider range of processlevel models of human visual description.",
    "pdf_parse": {
        "paper_id": "2020",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We model the production of quantified referring expressions (QREs) that identity collections of visual items. A previous approach, called Perceptual Cost Pruning, modeled human QRE production using a preference-based referring expression generation algorithm, first removing facts from the input knowledge base based on a model of perceptual cost. In this paper, we present an alternative model that incrementally constructs a symbolic knowledge base through simulating human visual attention/perception from raw images. We demonstrate that this model produces the same output as Perceptual Cost Pruning. We argue that this is a more extensible approach and a step toward developing a wider range of processlevel models of human visual description.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Modeling the generation of human-like referring expressions in visual contexts is an ongoing challenge in the field of natural language generation (NLG). One key aspect of this challenge is the disparity between how humans and current computational approaches operate at a process-level. Humans have perceptual and cognitive limitations; they can not mentally represent visual scenes down to the exact detail of every visual object or collection of objects. Thus, people tend to selectively attend to visual scenes in order to acquire enough information to complete their task (Yarbus, 1967) .",
                "cite_spans": [
                    {
                        "start": 577,
                        "end": 591,
                        "text": "(Yarbus, 1967)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In contrast, current referring expression generation (REG) algorithms generally assume a fullyspecified, symbolic knowledge base (Van Deemter, 2016) . Essentially, these approaches abstract away the process of perception and seek to model patterns of human language primarily at the content selection phase. One task where this approach breaks down is quantified reference expression (QRE) generation. Figure 1: Examples of QRE generation task problems, based on examples from (Barr et al., 2013) . QRE tasks involve referring to collections of visual items by communicating information about the quantities contained in each collection. Initial experimental work in human-produced QREs showed regularities in responses that were not easily explained by content selection processes (Barr et al., 2013) . To illustrate this, consider the two examples of QRE problems found in Figure 1 . In Problem A, it was found that participants favored relative quantity expressions (e.g., \"the box with the most circles\") over exact number expressions (e.g., \"the box with thirty-one circles\"). In contrast, participants favored exact expressions in Problem B (Barr et al., 2013) .",
                "cite_spans": [
                    {
                        "start": 129,
                        "end": 148,
                        "text": "(Van Deemter, 2016)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 477,
                        "end": 496,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 782,
                        "end": 801,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 1147,
                        "end": 1166,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 875,
                        "end": 883,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "To account for this finding, the experimenters appealed to a principle of least perceptual effort. Determining the exact number of approximately 30 objects is effortful and time-consuming, whereas determining that there are exactly three objects is quick and requires little effort. Thus, the findings can be explained by people generating exact QREs so long as determining the exact quantity did not exceed some threshold of effort.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "This explanation was validated using a computational method called Perceptual Cost Pruning, proposed previously by (Briggs and Harner, 2019 ).",
                "cite_spans": [
                    {
                        "start": 115,
                        "end": 139,
                        "text": "(Briggs and Harner, 2019",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Perceptual Cost Pruning was able to model human QRE production by: (1) starting with a complete symbolic knowledge base representing the visual scene; (2) removing facts from the input knowledge base based on a model of the time cost of exact enumeration; (3) using a preference-based referring expression generation algorithm (i.e., the Incremental Algorithm) on this reduced knowledge base. We call this approach a destructive approach, wherein a full knowledge base is degraded to be sparser.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we present a constructive approach to modeling the production of QREs, wherein a sparse, symbolic scene representation is built up from nothing. Specifically, we present an alternative model that incrementally builds a symbolic knowledge base through simulating human visual attention/perception from raw images. We demonstrate that this model produces the same output as perceptual cost pruning. Furthermore, we argue that this is a more extensible approach and a step toward developing a wider range of process-level models of human visual description.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Stimuli To model the QRE task we use the stimuli described by (Barr et al., 2013) . These stimuli constitute 20 QRE generation problems, each with one target collection and two distractor collections. The items in each collection are all the same with respect to size, shape, color, and all other attributes. They are also randomly distributed in their respective containers. Thus, the total quantity of items in each collection becomes a salient feature to differentiate collections, instead of individual object attributes or collection attributes like structured arrangement (e.g., shape of the group of items). While the precise quantities for each target and distractor pair were available, the original images were not. As such, we constructed a series of 120 images, six for each problem, corresponding to the different possible target and distractor configurations. Examples of these images can be found in Figure 1 .",
                "cite_spans": [
                    {
                        "start": 62,
                        "end": 81,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 915,
                        "end": 923,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "The QRE Task",
                "sec_num": "2"
            },
            {
                "text": "Output In the original study (Barr et al., 2013) , participants' QREs were annotated as following into different categories, and these annotations were refined in subsequent work (Briggs and Harner, 2019) . We use the latest annotation categories, which are: Exact Number (NUM) (e.g., \"the box with 31 dots\"); Relative quantity (REL) (e.g., \"the box with the most dots\"); and Absolute description (ABS) (e.g., \"the box with dots\"). The QRE generation algorithm presented in this paper is designed to predict which of these categories is included given a particular QRE task image.",
                "cite_spans": [
                    {
                        "start": 29,
                        "end": 48,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 179,
                        "end": 204,
                        "text": "(Briggs and Harner, 2019)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The QRE Task",
                "sec_num": "2"
            },
            {
                "text": "Our model of QRE generation was developed within the ARCADIA cognitive framework (Bridewell and Bello, 2016) . ARCADIA provides an ideal framework upon which to implement a model of incremental perception and quantified description for the following reasons: (1) attention and its strategic control is the central organizing feature of the system; (2) it provides the representational flexibility necessary for modeling both an approximate number system that supports group quantity estimation and an object-tracking system that supports exact enumeration (i.e., counting); (3) it aims to implement a cognitively plausible model of human visual processing; and (4) it operates in discrete cycles, allowing for modeling the time course of perceptual and cognitive processes. In the interest of space, we omit more precise technical details regarding ARCADIA models, which can be found in other work (Bridewell and Bello, 2016; Lovett et al., 2019) .",
                "cite_spans": [
                    {
                        "start": 81,
                        "end": 108,
                        "text": "(Bridewell and Bello, 2016)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 898,
                        "end": 925,
                        "text": "(Bridewell and Bello, 2016;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 926,
                        "end": 946,
                        "text": "Lovett et al., 2019)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Constructive Model of QRE Generation",
                "sec_num": "3"
            },
            {
                "text": "To produce the range of exact, relative, or absolute quantified expressions requires different forms of numerical representation at different levels of precision. Research shows that multiple forms of numerical representation underlie human number sense (Feigenson et al., 2004) . Approximate representations are obtained quickly through estimation (Barth et al., 2003) , while exact representations are obtained slowly through counting for quantities outside the subitizing range of about four items (Gelman and Gallistel, 1986) . Furthermore, evidence suggests that approximate and exact representations of quantity are obtained through different ways of deploying spatial attention (Hyde and Wood, 2011). Attention to groups results in estimation and approximate representation of quantity, while attention to individual objects underlies exact enumeration.",
                "cite_spans": [
                    {
                        "start": 254,
                        "end": 278,
                        "text": "(Feigenson et al., 2004)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 349,
                        "end": 369,
                        "text": "(Barth et al., 2003)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 501,
                        "end": 529,
                        "text": "(Gelman and Gallistel, 1986)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Representations",
                "sec_num": "3.1"
            },
            {
                "text": "In ARCADIA, approximate quantities are represented as Gaussians with a mean, \u00b5 = n, equal to Figure 2 : A screenshot of the ARCADIA model of QRE production in mid-run. The vertically oriented red bar in the task image indicates the current scope of spatial attention as it is being swept through the image. The Working Memory display shows the current state of the symbolic scene representation. Note that the last box has not yet been attended and encoded into working memory. Also note that no exact count was obtained for box-1, unlike box-0, as the quantity estimate indicated that it was too high effort to enumerate exactly.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 93,
                        "end": 101,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Representations",
                "sec_num": "3.1"
            },
            {
                "text": "the number of items n in a group that receives attentional focus, with a standard deviation, \u03c3 = w * n, where w is the Weber fraction associated with the simulated approximate number system. Here w = 0.13, based on empirical data (Odic et al., 2013) . Exact numbers are represented by count words, and the correct sequence of count words is represented in the system as assumed knowledge.",
                "cite_spans": [
                    {
                        "start": 230,
                        "end": 249,
                        "text": "(Odic et al., 2013)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Representations",
                "sec_num": "3.1"
            },
            {
                "text": "Below we give a high-level description of how our model operates. A screenshot of the model in operation can be found in Figure 2 . Videos of the model operating over example problems can also be found online. 1",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 121,
                        "end": 129,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Process",
                "sec_num": "3.2"
            },
            {
                "text": "The model performs a left-to-right sweep of spatial attention. For each lightly colored square box it encounters, it encodes into the symbolic scene representation in working memory the existence of a new box of items. Likewise for each of these boxes, spatial attention is then focused on the box and a subtask that encodes information about each collection within the box is initiated. The steps of this subtask are described below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Step 1: Target/Distractor Classification: The model determines whether or not the current 1 https://osf.io/6rsg7/?view only= 034f98a2449243e28e2a593797039093 collection is the target collection by considering the background color of the box.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Step 2: Quantity Estimation: The model then attends to the group of items within the box, resulting in an quantity estimate that is encoded and associated with the collection. This quantity estimate is encoded into working memory. Information is then encoded about whether or not items are present or absent (abs-desc) from the box.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Step 3: Countability Judgment: Next, a number is randomly sampled from the approximate distribution encoded during the previous quantity estimation step. If this number is lower than a countability threshold value \u03c4 count , then the quantity is deemed low effort enough to exactly enumerate and the model proceeds to Step 4. Otherwise, the model determines that exact enumeration will be too high effort, and skips Step 4. Evidence from the literature on numerosity perception provides support for this notion of a countability judgment guiding enumeration strategy (Mandler and Shebo, 1982) . Furthermore, we set our countability threshold value \u03c4 count = 7 based on experimental results from (Mandler and Shebo, 1982) .",
                "cite_spans": [
                    {
                        "start": 566,
                        "end": 591,
                        "text": "(Mandler and Shebo, 1982)",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 694,
                        "end": 719,
                        "text": "(Mandler and Shebo, 1982)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Step 4: Exact Enumeration: In the case when a collection is gauged to be countable, an exact count is associated with the box (num-desc). Spatial attention is then swept downwards within the collection. Individual objects are encoded into visual short term memory, and for each new object detected, the count is incremented.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Finally, after all applicable approximate and exact quantity representations are encoded for each box, the quantity information of each distractor is then compared with that of the target to ascertain information about the relative quantity of the target (rel-desc) compared with the distractors. Specifically, exact quantities are first compared if available. In the absence of exact counts, the approximate representations are compared. This is achieved by considering the Gaussian distribution that results from subtracting the Gaussian representing the distractor estimate from the Gaussian representing the target estimate. If the cumulative probability distribution of this new distribution for negative values above a threshold \u03c4 rel , this indicates that the target has a smaller quantity. Conversely, if the cumulative probability distribution for positive values is above the threshold, this indicates that the target has a larger quantity. In the scope of this paper, we set \u03c4 rel = 0.75. By comparing the target to each distractor in this manner, we determine whether the target is the collection with the most or fewest items (or neither).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Incremental Perception",
                "sec_num": "3.2.1"
            },
            {
                "text": "Having completed the process of incrementally constructing a symbolic scene representation, the model then begins the REG process. To do this we use a modified version of the the Incremental Algorithm (Dale and Reiter, 1995) . The modification is based on the one proposed by Briggs and Harner (2019) , wherein attributes with missing values are skipped. To evaluate the model, we used the best performing preference order from (Briggs and Harner, 2019) : num-desc > abs-desc > rel-desc.",
                "cite_spans": [
                    {
                        "start": 201,
                        "end": 224,
                        "text": "(Dale and Reiter, 1995)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 276,
                        "end": 300,
                        "text": "Briggs and Harner (2019)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 428,
                        "end": 453,
                        "text": "(Briggs and Harner, 2019)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Content Selection",
                "sec_num": "3.2.2"
            },
            {
                "text": "As a proof-of-concept evaluation, we ran the model over images corresponding to the twenty unique QRE task problems modeled by Perceptual Cost Pruning by Briggs and Harner (2019) . We then compared the resulting output attribute selections to the output reported for the Perceptual Cost Pruning algorithm configured with the same attribute preference order, verifying that it was the same for all problems. The problem descriptions and corresponding model output is found in Table 1 .",
                "cite_spans": [
                    {
                        "start": 154,
                        "end": 178,
                        "text": "Briggs and Harner (2019)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 475,
                        "end": 482,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation",
                "sec_num": "3.3"
            },
            {
                "text": "People can not mentally represent complex visual scenes down to the exact detail. Evidence suggests that people strategically attend to scenes to collect enough detail to complete their task. In other words, people build up sparse scene representations. Successfully modeling human performance on the QRE task requires the ability to model what pieces of information people do or do not encode. Prior work successfully modeled QRE generation through a destructive process, in which a complete scene representation is reduced according to models of perceptual cost. Here, we have presented a constructive approach to modeling QRE generation by simulating a process of incremental scene perception.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4"
            },
            {
                "text": "What does the constructive, incremental approach give us over the destructive one? First, it provides a model that begins with raw images, rather than a symbolic knowledge base. Second, we argue that the constructive approach is more extensible when applied to a wider range of possible visual description or REG tasks. The destructive approach requires the development of a separate model of perceptual cost for each attribute that it considers, which for complex domains could necessitate quite complicated collections of models. In the constructive approach that we present, considerations of perceptual cost and the order in which facts are encoded are built into the perceptual process and how attention is strategically controlled. One example where a constructive approach involving incremental scene representation would be beneficial is in describing more complex visual scenes with grouped items. In contrast to the stimuli in the present work, which are randomly scattered collections of visual items, common visual scenes often contain items that are organized together in multiple groups. In some cases, visual grouping can reduce the perceptual cost of determining the total quantity of items. Visual grouping allows for people to attend to and enumerate each group separately, establishing the total number through mental arithmetic, instead of slowly counting each item one-by-one (Starkey and McCandliss, 2014; Ciccione and Dehaene, 2020) . Recent work indicates that when generating quantified descriptions of scenes with visual groups of the same cardinality, people have a tendency to omit descriptions of total quantity, and instead describe the number of groups and the number of items within each group (Briggs et al., 2020) . This finding is consistent with the idea of incremental scene representation and that knowledge about the number of groups and cardinality of each group precedes knowledge of total quantity.",
                "cite_spans": [
                    {
                        "start": 1397,
                        "end": 1427,
                        "text": "(Starkey and McCandliss, 2014;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 1428,
                        "end": 1455,
                        "text": "Ciccione and Dehaene, 2020)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 1726,
                        "end": 1747,
                        "text": "(Briggs et al., 2020)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Approach Advantages",
                "sec_num": "4.1"
            },
            {
                "text": "We intend to use the model presented in this paper as a basis to explore how incremental perception can explain individual variation in the forms of QREs observed in (Barr et al., 2013) . While the present model waits for the scene representation to be encoded before content generation is started, it could be modified to begin the content planning phase earlier, before the scene is fully processed.",
                "cite_spans": [
                    {
                        "start": 166,
                        "end": 185,
                        "text": "(Barr et al., 2013)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Future Work",
                "sec_num": "4.2"
            },
            {
                "text": "We predict that in certain QRE problems, the order in which distractors are perceived and compared with the target may affect the content of generated expressions. Specifically, consider Problem 16 described in Table 1 . The model predicts a QRE containing both a relative and absolute description of quantity. Examples of different expressions that fit this template are: (A) \"the one with some, but not the most\"; and (B) \"the one with fewer, but not the empty one\". Expressions similar to forms A and B were found when the human data were reex-amined (Briggs and Harner, 2019) . We predict that the order in which the two distractors are perceived would determine which expression form is more commonly produced. Form A would correspond to the empty distractor being perceived first, while form B would correspond to the empty distractor being perceived second.",
                "cite_spans": [
                    {
                        "start": 554,
                        "end": 579,
                        "text": "(Briggs and Harner, 2019)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 211,
                        "end": 218,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Future Work",
                "sec_num": "4.2"
            }
        ],
        "back_matter": [
            {
                "text": "This work was supported by an NRL Karles Fellowship awarded to author and AFOSR MIPR grant F4FGA07074G001. The views expressed in this paper are solely those of the authors and should not be taken to reflect any official policy or position of the United States Government or the Department of Defense.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Generation of quantified referring expressions: evidence from experimental data",
                "authors": [
                    {
                        "first": "Dale",
                        "middle": [],
                        "last": "Barr",
                        "suffix": ""
                    },
                    {
                        "first": "Raquel",
                        "middle": [],
                        "last": "Kees Van Deemter",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Fern\u00e1ndez",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Proceedings of the 14th European Workshop on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "157--161",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Dale Barr, Kees van Deemter, and Raquel Fern\u00e1ndez. 2013. Generation of quantified referring expres- sions: evidence from experimental data. In Proceed- ings of the 14th European Workshop on Natural Lan- guage Generation, pages 157-161.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "The construction of large number representations in adults",
                "authors": [
                    {
                        "first": "Hilary",
                        "middle": [],
                        "last": "Barth",
                        "suffix": ""
                    },
                    {
                        "first": "Nancy",
                        "middle": [],
                        "last": "Kanwisher",
                        "suffix": ""
                    },
                    {
                        "first": "Elizabeth",
                        "middle": [],
                        "last": "Spelke",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Cognition",
                "volume": "86",
                "issue": "3",
                "pages": "201--221",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hilary Barth, Nancy Kanwisher, and Elizabeth Spelke. 2003. The construction of large number representa- tions in adults. Cognition, 86(3):201-221.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A Theory of Attention for Cognitive Systems",
                "authors": [
                    {
                        "first": "Will",
                        "middle": [],
                        "last": "Bridewell",
                        "suffix": ""
                    },
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Paul",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Bello",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "Proceedings of the Fourth Annual Conference on Advances in Cognitive Systems",
                "volume": "",
                "issue": "",
                "pages": "1--16",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Will Bridewell and Paul F Bello. 2016. A Theory of Attention for Cognitive Systems. In Proceedings of the Fourth Annual Conference on Advances in Cog- nitive Systems, pages 1-16, Evanston, USA.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Generating quantified referring expressions with perceptual cost pruning",
                "authors": [
                    {
                        "first": "Gordon",
                        "middle": [],
                        "last": "Briggs",
                        "suffix": ""
                    },
                    {
                        "first": "Hillary",
                        "middle": [],
                        "last": "Harner",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 12th International Conference on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "11--18",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gordon Briggs and Hillary Harner. 2019. Generating quantified referring expressions with perceptual cost pruning. In Proceedings of the 12th International Conference on Natural Language Generation, pages 11-18, Tokyo, Japan.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Visual grouping and pragmatic constraints in the generation of quantified descriptions",
                "authors": [
                    {
                        "first": "Gordon",
                        "middle": [],
                        "last": "Briggs",
                        "suffix": ""
                    },
                    {
                        "first": "Hillary",
                        "middle": [],
                        "last": "Harner",
                        "suffix": ""
                    },
                    {
                        "first": "Sangeet",
                        "middle": [],
                        "last": "Khemlani",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "Proceedings of the 42nd Annual Virtual Meeting of the Cognitive Science Society",
                "volume": "",
                "issue": "",
                "pages": "1008--1014",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gordon Briggs, Hillary Harner, and Sangeet Khemlani. 2020. Visual grouping and pragmatic constraints in the generation of quantified descriptions. In Pro- ceedings of the 42nd Annual Virtual Meeting of the Cognitive Science Society, pages 1008-1014.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Grouping mechanisms in numerosity perception",
                "authors": [
                    {
                        "first": "Lorenzo",
                        "middle": [],
                        "last": "Ciccione",
                        "suffix": ""
                    },
                    {
                        "first": "Stanislas",
                        "middle": [],
                        "last": "Dehaene",
                        "suffix": ""
                    }
                ],
                "year": 2020,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {
                    "DOI": [
                        "10.31234/osf.io/p6ryv"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Lorenzo Ciccione and Stanislas Dehaene. 2020. Grouping mechanisms in numerosity perception.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Computational interpretations of the gricean maxims in the generation of referring expressions",
                "authors": [
                    {
                        "first": "Robert",
                        "middle": [],
                        "last": "Dale",
                        "suffix": ""
                    },
                    {
                        "first": "Ehud",
                        "middle": [],
                        "last": "Reiter",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Cognitive science",
                "volume": "19",
                "issue": "2",
                "pages": "233--263",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robert Dale and Ehud Reiter. 1995. Computational interpretations of the gricean maxims in the gener- ation of referring expressions. Cognitive science, 19(2):233-263.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Core systems of number",
                "authors": [
                    {
                        "first": "Lisa",
                        "middle": [],
                        "last": "Feigenson",
                        "suffix": ""
                    },
                    {
                        "first": "Stanislas",
                        "middle": [],
                        "last": "Dehaene",
                        "suffix": ""
                    },
                    {
                        "first": "Elizabeth",
                        "middle": [],
                        "last": "Spelke",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Trends in Cognitive Sciences",
                "volume": "8",
                "issue": "",
                "pages": "307--314",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lisa Feigenson, Stanislas Dehaene, and Elizabeth Spelke. 2004. Core systems of number. Trends in Cognitive Sciences, 8:307-314.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "The child's understanding of number",
                "authors": [
                    {
                        "first": "Rochel",
                        "middle": [],
                        "last": "Gelman",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Charles R Gallistel",
                        "suffix": ""
                    }
                ],
                "year": 1986,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rochel Gelman and Charles R Gallistel. 1986. The child's understanding of number. Harvard Univer- sity Press.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Spatial attention determines the nature of nonverbal number representation",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Daniel",
                        "suffix": ""
                    },
                    {
                        "first": "Justin",
                        "middle": [
                            "N"
                        ],
                        "last": "Hyde",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Wood",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Journal of Cognitive Neuroscience",
                "volume": "23",
                "issue": "",
                "pages": "2336--2351",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daniel C Hyde and Justin N Wood. 2011. Spatial at- tention determines the nature of nonverbal number representation. Journal of Cognitive Neuroscience, 23:2336-2351.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Selection enables enhancement: An integrated model of object tracking",
                "authors": [
                    {
                        "first": "Andrew",
                        "middle": [],
                        "last": "Lovett",
                        "suffix": ""
                    },
                    {
                        "first": "Will",
                        "middle": [],
                        "last": "Bridewell",
                        "suffix": ""
                    },
                    {
                        "first": "Paul",
                        "middle": [],
                        "last": "Bello",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Journal of Vision",
                "volume": "19",
                "issue": "14",
                "pages": "1--31",
                "other_ids": {
                    "DOI": [
                        "10.1167/19.14.23"
                    ]
                },
                "num": null,
                "urls": [],
                "raw_text": "Andrew Lovett, Will Bridewell, and Paul Bello. 2019. Selection enables enhancement: An inte- grated model of object tracking. Journal of Vision, 19(14):1-31.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Subitizing: An analysis of its component processes",
                "authors": [
                    {
                        "first": "George",
                        "middle": [],
                        "last": "Mandler",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Billie",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Shebo",
                        "suffix": ""
                    }
                ],
                "year": 1982,
                "venue": "Journal of Experimental Psychology: General",
                "volume": "111",
                "issue": "",
                "pages": "1--22",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "George Mandler and Billie J Shebo. 1982. Subitizing: An analysis of its component processes. Journal of Experimental Psychology: General, 111:1-22.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Developmental change in the acuity of approximate number and area representations",
                "authors": [
                    {
                        "first": "Darko",
                        "middle": [],
                        "last": "Odic",
                        "suffix": ""
                    },
                    {
                        "first": "Melissa",
                        "middle": [
                            "E"
                        ],
                        "last": "Libertus",
                        "suffix": ""
                    },
                    {
                        "first": "Lisa",
                        "middle": [],
                        "last": "Feigenson",
                        "suffix": ""
                    },
                    {
                        "first": "Justin",
                        "middle": [],
                        "last": "Halberda",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "Developmental psychology",
                "volume": "49",
                "issue": "6",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Darko Odic, Melissa E Libertus, Lisa Feigenson, and Justin Halberda. 2013. Developmental change in the acuity of approximate number and area representa- tions. Developmental psychology, 49(6):1103.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "The emergence of \"groupitizing\" in children's numerical cognition",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Gillian",
                        "suffix": ""
                    },
                    {
                        "first": "Bruce",
                        "middle": [
                            "D"
                        ],
                        "last": "Starkey",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Mccandliss",
                        "suffix": ""
                    }
                ],
                "year": 2014,
                "venue": "Journal of Experimental Child Psychology",
                "volume": "126",
                "issue": "",
                "pages": "120--137",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gillian S Starkey and Bruce D McCandliss. 2014. The emergence of \"groupitizing\" in children's numerical cognition. Journal of Experimental Child Psychol- ogy, 126:120-137.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Computational models of referring: a study in cognitive science",
                "authors": [
                    {
                        "first": "Kees",
                        "middle": [],
                        "last": "Van Deemter",
                        "suffix": ""
                    }
                ],
                "year": 2016,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kees Van Deemter. 2016. Computational models of re- ferring: a study in cognitive science. MIT Press.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Eye movements during perception of complex objects",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Alfred",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Yarbus",
                        "suffix": ""
                    }
                ],
                "year": 1967,
                "venue": "Eye movements and vision",
                "volume": "",
                "issue": "",
                "pages": "171--211",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alfred L Yarbus. 1967. Eye movements during percep- tion of complex objects. In Eye movements and vi- sion, pages 171-211. Springer.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF1": {
                "html": null,
                "text": "QRE problems and resulting model output.",
                "content": "<table/>",
                "num": null,
                "type_str": "table"
            }
        }
    }
}