File size: 59,040 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
{
    "paper_id": "A00-1005",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T01:12:44.052213Z"
    },
    "title": "PartslD: A Dialogue-Based System for Identifying Parts for Medical Systems",
    "authors": [
        {
            "first": "Amit",
            "middle": [],
            "last": "Bagga",
            "suffix": "",
            "affiliation": {},
            "email": "bagga@crd.ge.com"
        },
        {
            "first": "Tomek",
            "middle": [],
            "last": "Strzalkowski",
            "suffix": "",
            "affiliation": {},
            "email": "strzalkowski@crd.ge.com"
        },
        {
            "first": "G",
            "middle": [],
            "last": "Bowden Wise",
            "suffix": "",
            "affiliation": {},
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper describes a system that provides customer service by allowing users to retrieve identification numbers of parts for medical systems using spoken natural language dialogue. The paper also presents an evaluation of the system which shows that the system successfully retrieves the identification numbers of approximately 80% of the parts.",
    "pdf_parse": {
        "paper_id": "A00-1005",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper describes a system that provides customer service by allowing users to retrieve identification numbers of parts for medical systems using spoken natural language dialogue. The paper also presents an evaluation of the system which shows that the system successfully retrieves the identification numbers of approximately 80% of the parts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Currently people deal with customer service centers either over the phone or on the world wide web on a regular basis. These service centers support a wide variety of tasks including checking the balance of a bank or a credit card account, transferring money from one account to another, buying airline tickets, and filing one's income tax returns. Most of these customer service centers use interactive voice response (IVR) systems on the front-end for determining the user's need by providing a list of options that the user can choose from, and then routing the call appropriately.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "The IVRs also gather essential information like the user's bank account number, social security number, etc. For back-end support, the customer service centers use either specialized computer systems (example: a system that retrieves the account balance from a database), or, as in most cases, human operators.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "However, the IVR systems are unwieldy to use. Often a user's needs are not covered by the options provided by the system forcing the user to hit 0 to transfer to a human operator. In addition, frequent users often memorize the sequence of options that will get them the desired information. Therefore, any change in the options greatly inconveniences these users. Moreover, there are users that always hit 0 to speak to a live operator because they prefer to deal with a human instead of a machine. Finally, as customer service providers continue to rapidly add functionality to their IVR systems, the size and complexity of these systems continues to grow proportionally. In some popular systems like the IVR system that provides customer service for the Internal Revenue Service (IRS), the user is initially bombarded with 10 different options with each option leading to sub-menus offering a further 3-5 options, and so on. The total number of nodes in the tree corresponding to the IRS' IVR system is quite large (approximately 100) making it extremely complex to use.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "Some customer service providers have started to take advantage of the recent advances in speech recognition technology. Therefore, some of the IVR systems now allow users to say the option number (1, 2, 3 ..... etc.) instead of pressing the corresponding button. In addition, some providers have taken this a step further by allowing users to say a keyword or a phrase from a list of keywords and/or phrases. For example, AT&T, the long distance company, provides their users the following options: \"Please say information for information on placing a call, credit for requesting credit, or operator to speak to an operator.\" However, given the improved speech recognition technology, and the research done in natural language dialogue over the last decade, there exists tremendous potential in enhancing these customer service centers by allowing users to conduct a more natural human-like dialogue with an automated system to provide a customer-friendly system. In this paper we describe a system that uses natural language dialogue to provide customer service for a medical domain.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "The system allows field engineers to call and obtain identification numbers of parts for medical systems using natural language dialogue. We first describe some work done previously in using natural language dialogue for customer service applications. Next, we present the architecture of our system along with a description of each of the key components. Finally, we conclude by providing results from an evaluation of the system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": null
            },
            {
                "text": "As mentioned earlier, some customer service centers now allow users to say either the option number or a keyword from a list of options/descriptions. However, the only known work which automates part of a customer service center using natural language dialogue is the one by Chu-Carroll and Carpenter (1999). The system described here is used as the front-end of a bank's customer service center. It routes calls by extracting key phrases from a user utterance and then by statistically comparing these phrases to phrases extracted from utterances in a training corpus consisting of pre-recorded calls where the routing was done by a human. The call is routed to the destination of the utterance from the training corpus that is most \"similar\" to the current utterance. On occasion, the system will interact with the user to clarify the user's request by asking a question. For example, if the user wishes to reach the loan department, the system will ask if the loan is for an automobile, or a home. Other related work is (Georgila et al., 1998) .",
                "cite_spans": [
                    {
                        "start": 1023,
                        "end": 1046,
                        "text": "(Georgila et al., 1998)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "1."
            },
            {
                "text": "While we are aware of the work being done by speech recognition companies like Nuance (www.nuance.com) and Speechworks (www.speechworks.com) in the area of providing more natural language dialogue-based customer service, we are not aware of any conference or journal publications from them. Some magazine articles which mention their work are (Rosen 1999; Rossheim 1999; Greenemeier 1999 ; Meisel 1999 ). In addition, when we tried out a demo of Nuance's systems, we found that their systems had a very IVRish feel to them. For example, if one wanted to transfer $50 from one account to another, the system would first ask the account that the money was coming from, then the account that the money was going to, and finally, the amount to be transferred. Therefore, a user could not say \"I want to transfer $50 from my savings account to my checking account\" and have the system conduct that transaction.",
                "cite_spans": [
                    {
                        "start": 343,
                        "end": 355,
                        "text": "(Rosen 1999;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 356,
                        "end": 370,
                        "text": "Rossheim 1999;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 371,
                        "end": 389,
                        "text": "Greenemeier 1999 ;",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 390,
                        "end": 401,
                        "text": "Meisel 1999",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "1."
            },
            {
                "text": "In addition to the works mentioned above, there have been several classic projects in the area of natural language dialogue like TRAINS/TRIPS project at Rochester (Allen et al., 1989 (Allen et al., , 1995 (Allen et al., , 1996 , Duke's Circuit-Fixit-Shoppe and Pascal Tutoring System (Biermann et al., 1997; 1995) , etc. While the Circuit-Fixit-Shoppe system helps users fix a circuit through a dialogue with the system, the TRIPS and the TRAINS projects allow users to plan their itineraries through dialogue. Duke's Pascal tutoring system helps students in an introductory programming class debug their programs by allowing them to analyze their syntax errors, get additional information on the error, and learn the correct syntax. Although these systems have been quite successful, they use detailed models of the domain and therefore cannot be used for diverse applications such as the ones required for customer service centers. Other related work on dialogue include (Carberry, 1990; Grosz and Sidner, 1986; Reichman, 1981) .",
                "cite_spans": [
                    {
                        "start": 163,
                        "end": 182,
                        "text": "(Allen et al., 1989",
                        "ref_id": null
                    },
                    {
                        "start": 183,
                        "end": 204,
                        "text": "(Allen et al., , 1995",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 205,
                        "end": 226,
                        "text": "(Allen et al., , 1996",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 284,
                        "end": 307,
                        "text": "(Biermann et al., 1997;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 308,
                        "end": 313,
                        "text": "1995)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 973,
                        "end": 989,
                        "text": "(Carberry, 1990;",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 990,
                        "end": 1013,
                        "text": "Grosz and Sidner, 1986;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 1014,
                        "end": 1029,
                        "text": "Reichman, 1981)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "1."
            },
            {
                "text": "Initially, we were approached by the medical systems business of our company for help in reducing the number of calls handled by human operators at their call center. An analysis of the types of customer service provided by their call center showed that a large volume of calls handled by their operators were placed by field engineers requesting identification numbers of parts for various medical systems. The ID numbers were most often used for ordering the corresponding parts using an automated IVR system. Therefore, the system we have built helps automate some percentage of these calls by allowing the engineer to describe a part using natural language. The rest of this section describes our system in detail.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PartslD: A System for Identification of Parts for Medical Systems",
                "sec_num": "2."
            },
            {
                "text": "The database we used for our system was the same as the one used by the operators at the call center. This database consists of the most common parts and was built by the operators themselves. However, the data contained in the database is not clean and there are several types of errors including mis-spellings, use of nonstandard abbreviations, use of several different abbreviations for the same word, etc.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "D a t a",
                "sec_num": "2.1"
            },
            {
                "text": "The database consists of approximately 7000 different parts. For each part, the database contains its identification number, a description, and the product (machine type) that it is used in. The descriptions consist of approximately 60,000 unique words of which approximately 3,000 are words which either are non-standard abbreviations or are unique to the medical domain (example: collimator).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "D a t a",
                "sec_num": "2.1"
            },
            {
                "text": "Due to the large size of the database, we did not attempt to clean the data. However, we did build several data structures based on the database which were used by the system. The primary data structures built were two inverted hash tables corresponding to the product, and the part description fields in the database. The inverted hash tables were built as follows: 1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "D a t a",
                "sec_num": "2.1"
            },
            {
                "text": "Each product and part description field was split into words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "D a t a",
                "sec_num": "2.1"
            },
            {
                "text": "2) Stop-words (words containing no information like: a, the, an, etc.) were filtered.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "D a t a",
                "sec_num": "2.1"
            },
            {
                "text": "Each remaining word was inserted as the index of the appropriate hash table with the identification number of the part being the value corresponding to the index. Therefore, for each non-stop-word word used in describing a part, the hash table contains a list of all the parts whose descriptions contained that word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3)",
                "sec_num": null
            },
            {
                "text": "Similarly, the products hash table contains a list of all parts corresponding to each product word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "3)",
                "sec_num": null
            },
            {
                "text": "The architecture of the system is shown in Figure 1 . The system was designed in a manner such that it could be easily ported from one application to another with minimal effort other than providing the domain-specific knowledge regarding the new application. Therefore, we decided to abstract away the domain-specific information into self-contained modules while keeping the other modules completely independent. The domain-specific modules are shown in the dark shaded boxes in Figure I . The remainder of this section discusses each of the modules shown in the system architecture.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 43,
                        "end": 51,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 481,
                        "end": 489,
                        "text": "Figure I",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "S y s t e m Architecture",
                "sec_num": "2.2"
            },
            {
                "text": "Since customer service centers are meant to be used by a variety of users, we needed a userindependent speech recognition system. In addition, since the system could not restrict the manner in which a user asked for service, the speech recognition system could not be grammar-based. Therefore, we used a general purpose dictation engine for the system. The dictation system used was Lernout & Hauspie's VoiceXPress system (www.lhs.com). Although the system was general purpose, we did provide to it the set of keywords and phrases that are commonly used in the domain thereby enabling it to better recognize these domain-specific keywords and phrases. The keywords and phrases used were simply the list of descriptions and product names corresponding to each part in the database. It should be noted that the set of domain-specific keywords and phrases was provided to the speech recognition system as a text document. In other words, the training was not done by a human speaking the keywords and phrases into the speech recognition system. In addition, the speech recognition system is far from perfect.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Speech Recognition System (ASR)",
                "sec_num": "2.2.1"
            },
            {
                "text": "The recognition rates hover around 50%, and the system has additional difficulty in identifying product names which are most often words not found in a dictionary (examples: 3MlaserCam, 8000BUCKY, etc.).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Speech Recognition System (ASR)",
                "sec_num": "2.2.1"
            },
            {
                "text": "The parser is domain-driven in the sense that it uses domain-dependent information produced by the lexicon to look for information, in a user utterance, that is useful in the current domain. However, it does not attempt to understand fully each user utterance. It is robust enough to handle ungrammatical sentences, short phrases, and sentences that contain mis-recognized text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parser and the Lexicon",
                "sec_num": "2.2.2"
            },
            {
                "text": "The lexicon, in addition to providing domain-dependent keywords and phrases to the parser, also provides the semantic knowledge associated with each keyword and phrase. Therefore, for each content word in the inverted hash tables, the lexicon contains entries which help the system determine whether the word was used in a part description, or a product name. In addition, the lexicon also provides the semantic knowledge associated with the pre-specified actions which can be taken by the user like \"operator\" which allows the user to transfer to an operator, and \"stop,\" or \"quit\" which allow the user to quit the system. Some sample entries are:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parser and the Lexicon",
                "sec_num": "2.2.2"
            },
            {
                "text": "collimator => (description_word, collimator) camera => (product_word, camera) operator => (user action, operator) etc.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parser and the Lexicon",
                "sec_num": "2.2.2"
            },
            {
                "text": "The parser scans a user utterance and returns, as output, a list of semantic tuples associated with each keyword/phrase contained in the utterance. It is mainly interested in \"key words\" (words that are contained in product and part descriptions, user action words, etc.) and it ignores all the other words in the user utterance. The parser also returns a special tuple containing the entire input string which may be used later by the context-based parser for sub-string matching specially in cases when the DM has asked a specific question to the user and is expecting a particular kind of response.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parser and the Lexicon",
                "sec_num": "2.2.2"
            },
            {
                "text": "The filler takes as input the set of tuples generated by the parser and attempts to check off templates contained in the templates module using these tuples, The set of templates in the templates module contains most of remaining domain-specific knowledge required by the system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Filler and Template Modules",
                "sec_num": "2.2.3"
            },
            {
                "text": "Each template is an internal representation of a part in the database. It contains for each part, its ID, its description, and the product which contains it. In addition, there are several additional templates corresponding to pre-specified user actions like \"operator,\" and \"quit.\" A sample template follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Filler and Template Modules",
                "sec_num": "2.2.3"
            },
            {
                "text": "For each tuple input from the parser, the filler checks off the fields which correspond to the tuple. For example, if the filler gets as input (description_word, collimator) , it checks off the description fields of those templates containing collimator as a word in the field. A template is checked off iff one or more of its fields is checked off.",
                "cite_spans": [
                    {
                        "start": 143,
                        "end": 173,
                        "text": "(description_word, collimator)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
                "sec_num": null
            },
            {
                "text": "In addition, the filler also maintains a list of all description and product words passed through the tuples (i.e. these words have been uttered by the user). These two lists are subsequently passed to the dialogue manager.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
                "sec_num": null
            },
            {
                "text": "Although the filler does not appear to be very helpful for the current application domain, it is an important part of the architecture for other application domains. For example, the current PartslD system is a descendant from an earlier system which allowed users to process financial transactions where the filler was instrumental in helping the dialogue manager determine the type of transaction being carried out by the user (Bagga et al., 2000) .",
                "cite_spans": [
                    {
                        "start": 429,
                        "end": 449,
                        "text": "(Bagga et al., 2000)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "tl__I = ( 'product' = > 'SFD', 'product__ids' = > 2229005\" 'product_descriptions' => 'IR RECEIVER PC BOARD CI104 BISTABLE MEMORY')",
                "sec_num": null
            },
            {
                "text": "The DM receives as input from the filler the set of templates which are checked off. In addition, it also receives two lists containing the list of description words, and product word uttered by the user. The DM proceeds using the following algorithm: 1) It first checks the set of checked off templates input from the filler. If there is exactly one template in this set, the DM asks the user to confirm the part that the template corresponds to. Upon receipt of the confirmation from the user, it returns the identification number of the part to the user. 2) Otherwise, for each description word uttered by the user, the DM looks up the set of parts (or templates) containing the word from the descriptions inverted hash table. It then computes the intersection of these sets. If the intersection is empty, the DM computes the union of these sets and proceeds treating the union as the intersection. 3) If the intersection obtained from (2) above contains exactly one template, the DM asks the user to confirm the part corresponding to the template as in (1) above. 4) Otherwise, the DM looks at the set of product words uttered by the user. If this set is empty, the DM queries the user for the product name. Since the DM is expecting a product name here, the input provided by the user is handled by the context-based parser.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Dialogue Manager (DM)",
                "sec_num": "2.2.4"
            },
            {
                "text": "Since most product names consist of nonstandard words consisting of alpha-numeric characters (examples: AMX3, 8000BUCKY, etc.), the recognition quality is quite poor. Therefore, the context-based parser ranks the input received from the user using a sub-string matching algorithm that uses character-based unigram and bigram counts (details are provided in the next section). The sub-string matching algorithm greatly enhances the performance of the system (as shown in the sample dialogue below). 5) If the set of product words is non-empty, or if the DM has successfully queried the user for a product name, it extracts the set of parts (templates) containing each product word from the product words inverted hash table. It then computes an intersection of these sets with the intersection set of description words obtained from (2) above. The resulting intersection is the joint product and description intersection. 6) If the joint intersection has exactly one template, the DM proceeds as in (1) above. Alternatively, if the number of templates in the joint intersection is less than 4, the DM lists the parts corresponding to each of these and asks the user to confirm the correct one. 7) If there are more than 4 templates in the joint intersection, the DM ranks the templates based upon word overlap with the description words uttered by the user. If the number of resulting top-ranked templates is less than 4, the DM proceeds as in the second half of (6) above. 8) If the joint intersection is empty, or in the highly unlikely case of there being more than 4 top-ranked templates in (7), the DM asks the user to enter additional disambiguating information.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Dialogue Manager (DM)",
                "sec_num": "2.2.4"
            },
            {
                "text": "The goal of the DM is to hone in on the part (template) desired by the user, and it has to determine this from the set of templates input to it by the filler. It has to be robust enough to deal with poor recognition quality, inadequate information input by the user, and ambiguous data. Therefore, the DM is designed to handle these issues. For example, description words that are mis-recognized as other description words usually cause the intersection of the sets of parts corresponding to these words to be empty. The DM, in this case, takes a union of the sets of parts corresponding to the description words thereby ensuring that the template corresponding to the desired part is in the union.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Dialogue Manager (DM)",
                "sec_num": "2.2.4"
            },
            {
                "text": "The DM navigates the space of possibilities by first analyzing the intersection of the sets of parts corresponding to the description words uttered by the user. If no unique part emerges, the DM then checks to see if the user has provided any information about the product that the part is going to be used in. If no product was mentioned by the user, the DM queries the user for the product name. Once this is obtained, the DM then checks to see if a unique part corresponds to the product name and the part description provided by the user. If no unique part emerges, then the DM backs off and asks the user to re-enter the part description. Alternatively, if more than one part corresponds to the specified product and part description, then the DM ranks the parts based upon the number of words uttered by the user. Obviously, since the DM in this case uses a heuristic, it asks the user to confirm the part that ranks the highest. If more than one (although less than 4) parts have the same rank, then the DM explicitly lists these parts and asks the user to specify the desired part. It should be noted that the DM has to ensure that the information it receives is actually what the user meant. This is especially true when the DM uses heuristics, and sub-string matches (as in the case of product names). Therefore, the DM occasionally asks the user to confirm input it has received.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Dialogue Manager (DM)",
                "sec_num": "2.2.4"
            },
            {
                "text": "When the dialogue manager is expecting a certain type of input (examples : product names, yes/no responses) from the user, the user response is processed by the context-based parser. Since the type of input is known, the context-based parser uses a sub-string matching algorithm that uses character-based unigram and bigram counts to match the user input with the expectation of the dialogue manager. Therefore, the sub-string matching module takes as input a user utterance string along with a list of expected responses, and it ranks the list of expected responses based upon the user response. Listed below are the details of the algorithm : 1) The algorithm first concatenates the words of the user utterance into one long string. This is needed because the speech recognition system often breaks up the utterance into words even though a single word is being said. For example, the product name AMXll0 is often broken up into the string 'Amex 110'.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Sub-String Matching Algorithm",
                "sec_num": "2.2.5"
            },
            {
                "text": "2) Next, the algorithm goes through the string formed in (1) and compares this character by character with the list of expected responses. It assigns one point for every common character. Therefore, the expected response 'AMX3' gets three points for the utterance 'Amex110'.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Sub-String Matching Algorithm",
                "sec_num": "2.2.5"
            },
            {
                "text": "3) The algorithm then compares the user utterance with the list of expected responses using 2 characters (bigrams) at a time. It assigns 2 points for each bigram match. For the example shown in (2), there are two bigram matches: the first is that the utterance starts with an 'A' (the previous character is this case is the null character), and the second is the bigram 'AM'. 4) The algorithm now compares the length of the user utterance string and the expected response. If the length of the two strings is the same, then it assigns 2 points to the expected response. 5) Finally, the algorithm calculates the number of unique characters in the expected response, and the user utterance string. If these characters are the same, then it assigns 4 points to the expected response. The expected response which has the highest number of points is the most likely one. If two or more expected responses have the same number of points, then the system asks the user to confh'm the correct one.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Sub-String Matching Algorithm",
                "sec_num": "2.2.5"
            },
            {
                "text": "While we have not evaluated this substring matching algorithm independently, a brief evaluation in the context of the system resulted in about 90% accuracy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Sub-String Matching Algorithm",
                "sec_num": "2.2.5"
            },
            {
                "text": "The presentation module works in one of two possible modes: over the phone, and over the web.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Presentation Module",
                "sec_num": "2.2.6"
            },
            {
                "text": "This module takes as input a string generated by the question-generation module and presents this string to the user in the appropriate mode of communication. If the speech option for the system is turned on, the speech-based output is generated using Lernout and Hauspie's RealSpeak text-to-speech system. Although the system currently cannot use both modes of communication simultaneously, we plan to incorporate this feature sometime in the future.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Presentation Module",
                "sec_num": "2.2.6"
            },
            {
                "text": "As with any dialogue system, it is extremely important for the system to be robust. Our system has the following two features which make it extremely robust: 1)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "7 Robustness of the System",
                "sec_num": "2.2."
            },
            {
                "text": "The user can, at any given moment, say operator to transfer to an operator, quit~stop to exit, and back~restart to start afresh.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "7 Robustness of the System",
                "sec_num": "2.2."
            },
            {
                "text": "When expecting a response from the user, if the system does not receive an expected input, it repeats the question at most twice before transferring control to an operator.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2)",
                "sec_num": null
            },
            {
                "text": "This section gives examples of two sample dialogues that occur during the testing of the system. The system's response is indicated by \"S>\", the user's response by \"U>\", and the recognition of the user's response by \"V>\". ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sample Dialogues",
                "sec_num": "3."
            },
            {
                "text": "The goal of our evaluation was to ensure that the system helped a user successfully identify parts irrespective of the performance of the speech recognition engine for the user. In other words, we wanted to see if the system was robust enough to conduct transactions with a diverse mix of users. We tested the system with 4 different users two of whom had foreign accents. For each user, we randomly selected 20 parts from the database. The results are summarized in Table 1 . These results show that the system was quite successful in handling requests from users with a variety of accents achieving varying recognition rates. Out of the 80 parts tested, only twice did the user feel that he/she had to transfer to an operator. The system successfully retrieved the identification numbers of 79% of the parts while transferring 19% of the cases to a human operator because of extremely bad ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 467,
                        "end": 474,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Evaluation and Results",
                "sec_num": "4."
            },
            {
                "text": "In this paper we have described a robust system that provides customer service for a medical parts application. The preliminary results are extremely encouraging with the system being able to successfully process approximately 80% of the requests from users with diverse accents.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We wish to thank the GE Medical Systems team of Todd Reinke, Jim Tierney, and Lisa Naughton for providing support and funding for this project. In addition, we also wish to thank Dong Hsu of Lernout and Hauspie for his help on the ASR and the text-to-speech systems. Finally, we wish to thank the Information Technology Laboratory of GE CRD for providing additional funding for this project.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "The TRAINS Project: A case study in building a conversational planning agent",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "F"
                        ],
                        "last": "Allen",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Journal of Experimental and Theoretical AI",
                "volume": "",
                "issue": "7",
                "pages": "7--48",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Allen, J. F. et al. (1995) The TRAINS Project: A case study in building a conversational planning agent. Journal of Experimental and Theoretical AI, (7) 7-48.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "A Robust System for Natural Spoken Dialogue",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "F"
                        ],
                        "last": "Allen",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [
                            "W"
                        ],
                        "last": "Miller",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [
                            "K"
                        ],
                        "last": "Ringer",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Sikorski",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "34th Annual Meeting of the ACL",
                "volume": "",
                "issue": "",
                "pages": "62--70",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Allen, J. F., Miller, B. W.; Ringer, E. K.; and Sikorski, T. (1996) A Robust System for Natural Spoken Dialogue. 34th Annual Meeting of the ACL, Santa Cruz, 62-70.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "FidelityXPress: A Multi-Modal System for Financial Transactions",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Bagga",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [
                            "C"
                        ],
                        "last": "Stein",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Strzalkowski",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 6 a~ Conference on Content-Based Multimedia Information Access (RIAO'00)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bagga, A., Stein G. C., and Strzalkowski, T. (2000) FidelityXPress: A Multi-Modal System for Financial Transactions. Proceedings of the 6 a~ Conference on Content-Based Multimedia Information Access (RIAO'00).",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Natural language with discrete speech as a mode for human to machine communication",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "W"
                        ],
                        "last": "Biermann",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Rodman",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Rubin",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "R"
                        ],
                        "last": "Heidlage",
                        "suffix": ""
                    }
                ],
                "year": 1985,
                "venue": "Communication of the ACM",
                "volume": "18",
                "issue": "6",
                "pages": "628--636",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Biermann, A.W.; Rodman, R.; Rubin, D.; and Heidlage, J.R. (1985) Natural language with discrete speech as a mode for human to machine communication. Communication of the ACM 18(6): 628-636.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Goal-orientedMultimedia Dialogue with Variable Initiative",
                "authors": [
                    {
                        "first": "Alan",
                        "middle": [
                            "W"
                        ],
                        "last": "Biermann",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Guinn",
                        "suffix": ""
                    },
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Curry",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Fulkerson",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [
                            "A"
                        ],
                        "last": "Keim",
                        "suffix": ""
                    },
                    {
                        "first": "Z",
                        "middle": [],
                        "last": "Liang",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [
                            "M"
                        ],
                        "last": "Melamed",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Rajagopalan",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "Lecture Notes in Artificial Intelligence",
                "volume": "",
                "issue": "",
                "pages": "1--16",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Biermann, Alan W.; Guinn, Curry I.; Fulkerson, M.: Keim, G.A.; Liang, Z.; Melamed, D.M.; and Rajagopalan, K. (1997) Goal-orientedMultimedia Dialogue with Variable Initiative. Lecture Notes in Artificial Intelligence 1325; Springer-Verlag, New York; pp. 1-16.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Plan Recognition in Natural Language Dialogue",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Carberry",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Carberry, S. (1990) Plan Recognition in Natural Language Dialogue. Cambridge, Mass.: The MIT Press.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Vector-Based Natural Language Call Routing",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Chu-Carroll",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Carpenter",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Journal of Computational Linguistics",
                "volume": "25",
                "issue": "30",
                "pages": "361--388",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Chu-Carroll, J, and R. Carpenter. (1999) Vector- Based Natural Language Call Routing. Journal of Computational Linguistics, 25(30), pp. 361-388.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "An Integrated Dialogue System for the Automation of Call Centre Services",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Georgila",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Tsopanoglou",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Fakotakis",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Kokkinakis",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "ICLSP'98, 5th International Conference on Spoken Language Processing",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Georgila, K., A.Tsopanoglou, N.Fakotakis and G.Kokkinakis. (1998) An Integrated Dialogue System for the Automation of Call Centre Services. ICLSP'98, 5th International Conference on Spoken Language Processing, Sydney, Australia.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Attentions, intentions, and the structure of discourse",
                "authors": [
                    {
                        "first": "B",
                        "middle": [
                            "J"
                        ],
                        "last": "Grosz",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "L"
                        ],
                        "last": "Sidner",
                        "suffix": ""
                    }
                ],
                "year": 1986,
                "venue": "Computational Linguistics",
                "volume": "12",
                "issue": "3",
                "pages": "175--204",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Grosz, B.J. and Sidner, C.L. (1986) Attentions, intentions, and the structure of discourse. Computational Linguistics 12(3): 175-204.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Voice-Recognition Technology Builds a Following. Information Week",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Greenemeier",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Greenemeier, L. (1999) Voice-Recognition Technology Builds a Following. Information Week, December 13.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Can Speech Recognition Give Telephones a New Face?",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Meisel",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Business Communications Review",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Meisel, W. (1999) Can Speech Recognition Give Telephones a New Face? Business Communications Review, November 1.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Plain-speaking: A theory and grammar of spontaneous discourse",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Reichman",
                        "suffix": ""
                    }
                ],
                "year": 1981,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Reichman, R.. (1981) Plain-speaking: A theory and grammar of spontaneous discourse. PhD thesis, Department of Computer Science, Harvard University, Cambridge, Massachusetts.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Speech Has Industry Talking. Business Travel News",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Rosen",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rosen, C. (1999) Speech Has Industry Talking. Business Travel News, November.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Giving Voice to Customer Service. Datamation",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Rossheim",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rossheim, J. (1999) Giving Voice to Customer Service. Datamation, November 1.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "text": "Figure 1. PartslD System Architecture",
                "num": null,
                "uris": null
            }
        }
    }
}