File size: 65,382 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
{
    "paper_id": "O07-3001",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T08:08:24.918393Z"
    },
    "title": "Differences in the Speaking Styles of a Japanese Male According to Interlocutor; Showing the Effects of Affect in Conversational Speech",
    "authors": [
        {
            "first": "Nick",
            "middle": [],
            "last": "Campbell",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Keihanna Science City",
                "location": {
                    "postCode": "619-0288",
                    "settlement": "Kyoto",
                    "country": "Japan"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "There has been considerable interest recently in the processing of affect in spoken interactions. This paper presents an analysis of some conversational speech corpus data showing that the four prosodic characteristics, duration, pitch, power, and voicing all vary significantly according to both interlocutor differences and differences in familiarity over a fixed period of time with the same interlocutor.",
    "pdf_parse": {
        "paper_id": "O07-3001",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "There has been considerable interest recently in the processing of affect in spoken interactions. This paper presents an analysis of some conversational speech corpus data showing that the four prosodic characteristics, duration, pitch, power, and voicing all vary significantly according to both interlocutor differences and differences in familiarity over a fixed period of time with the same interlocutor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Human spoken interactions convey a variety of different types of information. In addition to the linguistic content of speech, there are also paralinguistic and extralinguistic elements that convey discourse-level and interpersonal levels of information related to the speaker, to the speaker's relationship(s) with the listener, and to the intended and actual progress of the discourse [Lindblom 1990; Stenstr\u00f6m 1994; Hirschberg 1992 Hirschberg , 1995 .",
                "cite_spans": [
                    {
                        "start": 387,
                        "end": 402,
                        "text": "[Lindblom 1990;",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 403,
                        "end": 418,
                        "text": "Stenstr\u00f6m 1994;",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 419,
                        "end": 434,
                        "text": "Hirschberg 1992",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 435,
                        "end": 452,
                        "text": "Hirschberg , 1995",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "Affect is conveyed in speech communication in a multitude of ways [Cahn 1989 ], including facial expression, gesture, body posture, speaking-style, tone-of-voice, lexical choice, syntactic construction, etc. It is perhaps impossible for a human to speak without revealing information about his or her affective states [Campbell 2005 ]. This paper examines how such affective information might be carried in the voice, particularly in the prosody of the speech, and shows from an examination of some corpus data that evidence can be found for changes in affective state according to the nature of the interocutor and the history of their discoursal relationship.",
                "cite_spans": [
                    {
                        "start": 66,
                        "end": 76,
                        "text": "[Cahn 1989",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 318,
                        "end": 332,
                        "text": "[Campbell 2005",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1."
            },
            {
                "text": "Speech research nowadays is predominantly corpus-based. One learns about the characteristics of speech and the expressivity of speech utterances from the analysis of a very large number of samples collected under a variety of speaking conditions [Campbell et al. 2006; Cowie et al. 2005] . For a period of five years, in order to aid the development of a technology capable of Expressive Speech Processing (ESP), the Japan Science and Technology Agency funded the collection of a large corpus of expressive speech that was coordinated by ATR in Kyoto, Japan.",
                "cite_spans": [
                    {
                        "start": 246,
                        "end": 268,
                        "text": "[Campbell et al. 2006;",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 269,
                        "end": 287,
                        "text": "Cowie et al. 2005]",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The JST/CREST ESP Corpus",
                "sec_num": "2."
            },
            {
                "text": "As part of this corpus, over a period of three months during 2002, a group of ten volunteers were employed to talk with each other over the telephone for half-an-hour each time and to record their conversations to DAT using high-quality head-mounted condenser microphones. These conversations and their manually-produced transcriptions now form subset ESP_C of the JST/CREST ESP Corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The JST/CREST ESP Corpus",
                "sec_num": "2."
            },
            {
                "text": "Of the ten volunteers, two were native speakers of Chinese, both fluent in Japanese, one male and one female, and two were native speakers of English, both fluent in Japanese, one male and one female. The remaining six were Japanese native speakers, three men and three women, living in the Kansai area of central Japan. They did not know each other initially but became familiar over the period of their telephone conversations. To our direct knowledge they never met face-to-face during this period. This paper focuses on the speech characteristics of one male speaker from this corpus, JMA, who spoke with six partners over the three month period. In all, their conversations include 49,377 utterances from speaker JMA, where an utterance is approximately defined as the shortest meaningful unit of speech produced under a single intonation contour. The actual boundaries were determined on a case-by-case basis by the transcribers according to a set of rules published elsewhere ].",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The JST/CREST ESP Corpus",
                "sec_num": "2."
            },
            {
                "text": "The paper examines the acoustic characteristics of these utterances according to differences in interlocutor and stage of the interaction, showing that speaking style and voice phonation characteristics vary according to the interlocutor, in accordance with changes in familiarity and other speaker-listener relationships.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The JST/CREST ESP Corpus",
                "sec_num": "2."
            },
            {
                "text": "In previous work [Campbell 2005 affect-bearing utterances were distinguished from those that serve primarily to portray propositional or 'linguistic' content. The former, often called 'grunts' or 'affect bursts' are not usually found registered as words in a language dictionary, but are found very frequently in colloquial speech. For this study, a subset of 100 of those that occurred more than 50 times each in the conversations of one speaker (JMA) was selected, yielding 11,750 short conversational utterances for subsequent acoustic analysis.",
                "cite_spans": [
                    {
                        "start": 17,
                        "end": 31,
                        "text": "[Campbell 2005",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Materials for the Study",
                "sec_num": "3."
            },
            {
                "text": "These were taken from five conversations each with each of the Chinese and English native-speakers, and from ten conversations with the Japanese native-speaker partners. Table   1 shows the number of utterances produced with each interlocutor. Table 2 lists the romanised orthographic transcriptions and counts of some of the more common examples.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 170,
                        "end": 179,
                        "text": "Table   1",
                        "ref_id": null
                    },
                    {
                        "start": 244,
                        "end": 251,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Materials for the Study",
                "sec_num": "3."
            },
            {
                "text": "The initial letters J,C,E in the interlocutor identifiers stand for Japanese, Chinese, and English respectively, the middle letters F and M stand for female and male respectively, and the third letter is an identifier. The speech files corresponding to these utterances were analysed for their acoustic characteristics and a table of statistics for each utterance was produced. Specifically, the duration, pitch, power, and spectral characteristics of each utterance were recorded.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 1. Utterance counts for the series of conversations with each interlocutor.",
                "sec_num": null
            },
            {
                "text": "Duration was expressed both as absolute (log) duration of the measured utterance and as 'speaking rate' by dividing the absolute duration of the utterance by the number of phonemes in its transcription. This is a crude measure which does not take into consideration the inherent differences in different phone durations, but which serves to provide a simple approximation of speaking rate which will suffice for the present analysis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 1. Utterance counts for the series of conversations with each interlocutor.",
                "sec_num": null
            },
            {
                "text": "Pitch (or more precisely, a measure of the fundamental frequency of the voice) was extracted using the ESPS 'get_f0' method that is incorporated in the 'Snack' signal processing library. The maximum and minimum pitch values for each file were recorded and stored along with an estimate of the range and average values for each utterance. The pitch contour was characterised by noting the average values measured over each third of the utterance, and stored these along with the percentage position of the pitch peak and the lowest pitch value.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 1. Utterance counts for the series of conversations with each interlocutor.",
                "sec_num": null
            },
            {
                "text": "Power values (i.e, measures of rms waveform amplitude) were calculated similarly, using the Snack command \"power\", and stored as maximum, minimum, average, and range for each utterance.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 1. Utterance counts for the series of conversations with each interlocutor.",
                "sec_num": null
            },
            {
                "text": "Spectral characteristics were calculated using the Snack command \"dBPower\" with options \"-fftlen 128 -windowlength 128 -analysistype LPC -lpcorder 20 \". This produced an LPC-based 64-point vector representing the long-term average spectrum for the entire utterance (average length 0.54 seconds) from which values from points 2, 3, 4, 5, 7, and 9 were selected to represent the average power up to 1.5 kHz, points 12,15,19,23,28 to represent the average power between 1.5kHz and 4kHz, and points 34, 41, 49, 56, and 63 were selected to represent the average power between 4kHz and 8kHz. The average spectral energy measured in each of these three frequency bands was stored as a 3-valued vector for subsequent 'spectral' analysis. Since our main objective here is to examine spectral tilt, as evidence of differential phonation styles, the relative differences between the three bands (mid, high, and low on a mel-scale) were determined to suffice as a measure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Table 1. Utterance counts for the series of conversations with each interlocutor.",
                "sec_num": null
            },
            {
                "text": "After confirming independence of the variables under examination, a weak but insignificnt correlation of 032 r =.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Analysis of the Data",
                "sec_num": "4."
            },
            {
                "text": "was found between variations in pitch range and power range, and one of 037 r =.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Analysis of the Data",
                "sec_num": "4."
            },
            {
                "text": "between the averaged values of pitch and power across the 11,750 short utterances selected from the 49,377 utterances in the conversational corpus. There was a similar weak correlation between the measures of duration and power ( 034 r =. ) but none",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Analysis of the Data",
                "sec_num": "4."
            },
            {
                "text": "between duration and pitch ( 019 r =. ). The correlation between spectral energy (power in the lowest band) and raw signal amplitude (signal power) was 008 . . One can thus be satisfied that the measures are sufficiently independent to carry meaningful information in their differences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "There was a clear corrrelation of 081 r =.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "observed between energy in the first spectral band (frequencies up to 1500 Hz) and in the second (frequencies between 1.5kHz and 4kHZ), but no such correlation between frequencies in the second and third bands (i.e., between frequencies below and above 4kHz) which showed a correlation of 02 r =.. It is the difference between these latter two bands that is of interest here, since the lack of energy in the upper frequency bands is an indicator of a less tense, more breathy, speaking style which has been shown in previous studies ([ Gauffin and Sundberg 1989; Sluijter and van Heuven 1994; Campbell and Mokhtari 2003] ) to correlate with intimacy and a more careful manner of speaking. Figure 1 shows the values of f0 measured from the speech data of the male speaker JMA plotted separately for each interlocutor. The left plot shows average f0, the middle plot maximuim f0, and the right-hand plot minimum values of f0 measured in the conversations with each interlocutor respectively. The box-plots show median and interquartile values, with whiskers extending to 1.5 times the interquartile range. The boxes are drawn with widths proportional to the square-roots of the number of observations in the groups. A notch is drawn in each side of the boxes. If the notches of two plots do not overlap this is 'strong evidence' that the two medians differ at the 5% level of confidence. Figure 1 shows that there is more variation in the voice fundamental frequency of speaker JMA when talking to the non-native partners, while the average values of f0 for the Japanese partners JFA and JMB are higher and less dispersed. The maximum f0 is highest when speaking with the English female, and lowest when talking with the Chinese male partner. When speaking with the Japanese native speakers, the maximum f0 shows the same median values as when talking with the English female partner, but there is overall more variety in f0 when speaking with the non-native parters. Figure 2 plots the average, maximum and minimum power values for conversations with each of the six interlocutors. It shows that more energy is used when speaking with the Japanese partners, and more variation when speaking with the non-native interlocutors.",
                "cite_spans": [
                    {
                        "start": 536,
                        "end": 562,
                        "text": "Gauffin and Sundberg 1989;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 563,
                        "end": 592,
                        "text": "Sluijter and van Heuven 1994;",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 593,
                        "end": 620,
                        "text": "Campbell and Mokhtari 2003]",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 689,
                        "end": 697,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 1386,
                        "end": 1394,
                        "text": "Figure 1",
                        "ref_id": null
                    },
                    {
                        "start": 1966,
                        "end": 1974,
                        "text": "Figure 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "Interestingly, the minimum power appears to be higher among conversations with the Chinese male partner than among conversations with the other interlocutors, but significantly lower for conversations with the Japanese female partner. Figure 4 shows details of 'speaking rate' changes across the series of conversations with the two Japanese partners. This measure was calculated for each utterance by dividing the observed duration of its speech waveform (measured in milliseconds) by the number of characters in its transcription (see Table 2 for examples) and is therefore only an approximation of the true speaking rate, but it serves as a basis for comparison and provides a simple form of normalisation for the inherent differences in utterance type. Speaker JMA took part in nine conversations with female JFA, and eleven with male JMB. We note an average of 174.2 milliseconds per phone for interlocutor JFA, and an average of 169.65 for interlocutor JMB. The speaking rate with the male partner appears to slow down throughout the series of conversations, while after reaching a peak in conversation J04 with JFA it appears to revert to a higher rate with the progression of time. Median values for JFA are 143, 162, 180, 183, 170, 166, 157, 171 , and 158, while those for JMB are 144. 5, 150.5, 183.0, 171.0, 149.5, 165.5, 170.5, 181.0, 167.5, 190 .0, and 182.0.",
                "cite_spans": [
                    {
                        "start": 1216,
                        "end": 1220,
                        "text": "143,",
                        "ref_id": null
                    },
                    {
                        "start": 1221,
                        "end": 1225,
                        "text": "162,",
                        "ref_id": null
                    },
                    {
                        "start": 1226,
                        "end": 1230,
                        "text": "180,",
                        "ref_id": null
                    },
                    {
                        "start": 1231,
                        "end": 1235,
                        "text": "183,",
                        "ref_id": null
                    },
                    {
                        "start": 1236,
                        "end": 1240,
                        "text": "170,",
                        "ref_id": null
                    },
                    {
                        "start": 1241,
                        "end": 1245,
                        "text": "166,",
                        "ref_id": null
                    },
                    {
                        "start": 1246,
                        "end": 1250,
                        "text": "157,",
                        "ref_id": null
                    },
                    {
                        "start": 1251,
                        "end": 1254,
                        "text": "171",
                        "ref_id": null
                    },
                    {
                        "start": 1295,
                        "end": 1357,
                        "text": "5, 150.5, 183.0, 171.0, 149.5, 165.5, 170.5, 181.0, 167.5, 190",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 235,
                        "end": 243,
                        "text": "Figure 4",
                        "ref_id": null
                    },
                    {
                        "start": 537,
                        "end": 544,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Fundamental Frequency and Power",
                "sec_num": "4.1"
            },
            {
                "text": "These values may seem unexpectedly long to an observant reader familiar with segmental durations, but it should be noted that they are sounds in afective grunts, not phones in lexical words. For example, the quantile durations (in seconds) for the word \"hai\" (=\"yes\"/\"I'm listening\"/\"I agree\") are as follows: minimum: 0.152, 25th percentile: 0.382, median 0.43, 75th percentile: 0.49, and max: 1.59 seconds (n=7295). Those for the word \"ee\" (n=2679) are 0.268, 0.344, 0.42, 0.479, and 0.539. The durations observed for laughs in this context (n=3041) ranged from 119 milliseconds to four seconds, with a median duration of 0.9 seconds. Note that many of these utterances bear lengthening diacritics (e.g., 'a', 'a-', 'a-', 'a-', etc) and the transcribers who were all native speakers of Japanese were instructed to use one minus-sign to mark each mora-worth of lengthening perceived on the segment. It is customary to use such lengthening marks in standard Japanese Kana orthography, and mora durations are typically strictly observed in Japanese, where a moraic difference in timing",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Figure 4. Speaking rate changes over weekly sessions",
                "sec_num": null
            },
            {
                "text": "sructure can (unlike English) cue a different lexical item 1 .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "Spectral energy provides a simple cue to phonation style; the less energy in the higher part of the spectrum, the breathier the voice, and vice-versa. In pressed voice, the glottis closes faster as a proportion of the fundamental period, and the rapid closing is a sign of increased vocal effort and/or laryngeal muscular tension [Klasmeyer and Sendlmeier 1997; Fant 1993; Johnstone and Scherer 2000] . In conversational speech considerable use is made of voice phonation settings, especially for the display of affect [Campbell 2005; Campbell and Mokhtari 2003 ]. show differences between the low-frequency energy and the higher bands. Differences are also measured in decibels, and here 'low-band minus mid-band energy' is plotted in the centre plot, and 'mid-band minus high-band energy' is plotted in the right-hand plot. By plotting the differences rather than the absolute values, it is easier to visualise the spectral slope differences across these utterances.",
                "cite_spans": [
                    {
                        "start": 330,
                        "end": 361,
                        "text": "[Klasmeyer and Sendlmeier 1997;",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 362,
                        "end": 372,
                        "text": "Fant 1993;",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 373,
                        "end": 400,
                        "text": "Johnstone and Scherer 2000]",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 519,
                        "end": 534,
                        "text": "[Campbell 2005;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 535,
                        "end": 561,
                        "text": "Campbell and Mokhtari 2003",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Spectral Slope",
                "sec_num": "4.3"
            },
            {
                "text": "The figure, averaged over all conversations, shows higher low-band energy for the Japanese female and the two Chinese interlocutors, with increased spectral slope for the Japanese female in the mid-band, and steeper spectral slope for the Japanese female and the two Chinese interlocutors at the top-end of the spectrum. The spectrum is therefore flatter overall for the English native-speaking partners and for the Japanese male partner. A flatter spectrum has been shown to reflect more tension in the voice.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 4,
                        "end": 11,
                        "text": "figure,",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Spectral Slope",
                "sec_num": "4.3"
            },
            {
                "text": "Quantiles for the three spectral bands (measured over all data for speaker JMA) are given",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Spectral Slope",
                "sec_num": "4.3"
            },
            {
                "text": "in Table 3 , which shows median values to be -42, 14, and 9 decibels respectively. The difference of 14 decibels indicates that the average value for energy measured in the frequency range between 1.5kHz and 4kHz is -56 decibels, while the energy between 4kHz and 8kHz is typically at the -65 decibel level. second and third conversations, considerable variability in the high-frequency dropoff. This would be consistent with a higher degree of tension and varying politeness in the speech of the initial and penultimate conversations. For conversations in the interim period, from J04 to J10, a gradual decrease of steepness is found in the high-end spectral tilt that would be consistent with an increase in familiarity as reflected by more frankness and less polite softening of the voice. Then for the final conversations, as the recordings (and the three-month relationship)",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 3,
                        "end": 10,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Spectral Slope",
                "sec_num": "4.3"
            },
            {
                "text": "come to an end, there is an increase again, as would be consistent with a rise in formality of the conversational speech between the partners.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Figure 5. Three measures of spectral energy provide an indication of spectral slope. The left-hand plot shows average energy measured between 0-1.5kHz, the middle plot the difference between that and average energy measured between 1.5kHz and 4kHz, and the right-hand plot shows the difference between the averaged mid-band energy and the averaged energy between 4kHz and 8kHz at the top end of the spectrum. Measures are plotted separately by interlocutor.",
                "sec_num": null
            },
            {
                "text": "In the lower plots, with the non-native speaker partners, there is a similar steeepness in high-frequency dropoff (consistent with increased politeness in the voice and speaking style)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Figure 5. Three measures of spectral energy provide an indication of spectral slope. The left-hand plot shows average energy measured between 0-1.5kHz, the middle plot the difference between that and average energy measured between 1.5kHz and 4kHz, and the right-hand plot shows the difference between the averaged mid-band energy and the averaged energy between 4kHz and 8kHz at the top end of the spectrum. Measures are plotted separately by interlocutor.",
                "sec_num": null
            },
            {
                "text": "in the initial and final recordings, and a gradual relaxation of spectral tilt in conversations of the interim period. Steeper spectral slope is found in the conversations with the female partners, with the Chinese female being highest and the English male being lowest in this respect.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Figure 5. Three measures of spectral energy provide an indication of spectral slope. The left-hand plot shows average energy measured between 0-1.5kHz, the middle plot the difference between that and average energy measured between 1.5kHz and 4kHz, and the right-hand plot shows the difference between the averaged mid-band energy and the averaged energy between 4kHz and 8kHz at the top end of the spectrum. Measures are plotted separately by interlocutor.",
                "sec_num": null
            },
            {
                "text": "In this analysis of the prosodic characteristics of the conversational speech of one Japanese male over a period of three months, considerable variation was found in all of the parameters measured. By factoring the analysis according to differences in interlocutor as well as by differences in time, or sequence of the conversations, we were able to show that the changes are not a result of time-related changes, such as tiredness or ill-health, but that they correlate more with differences in interlocutor and with development of the individual relationships.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5."
            },
            {
                "text": "It is probable that not all interlocutors were related to in an equal way. One can imagine more sharing of common interests between native speakers of the same language, and different forms of bonding in the relationships that developed between the male and the female partners respectively. Similarly, the culturally closer, Asian but foreign, Chinese partners and the possibly exotic, and maybe more foreign, English speakers would have brought different contributions and cultural assumptions to the conversations. Their necessary lack of fluency in the use of Japanese, particularly over the telephone where the visual support for communication is impaired, would have introduced idiosyncracies into the style of the different conversations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5."
            },
            {
                "text": "Without a complementary analysis of the texts of the conversations, one can only draw speculative conclusions to explain the differences in the prosodic characteristics, but from the spectra of speech with partner JMB in Figure 6 it can be assumed that the initial relatively low spectral energy and high spectral tilt of conversation J01 represent the 'baseline' settings for speaker JMA who had no expectations at that time about his partners. One might then",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 221,
                        "end": 229,
                        "text": "Figure 6",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5."
            },
            {
                "text": "speculate that the apparent increase in politeness (as indicated by a more breathy speaking style) in conversations II and III could be due to having to maintain a conversation for a long 30-minutes over the telephone with a partner who is still relatively unknown to the speaker, and that the decrease thereafter occurred as they found more interests in common to talk about.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "From a brief examination of the transcriptions, they certainly appear to have become friends over the three month period. If so, then perhaps one can also speculate that the increase of breathiness in their speech towards the end is indeed due to the approaching termination of their telephone relationship.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Showing the Effects of Affect in Conversational Speech",
                "sec_num": null
            },
            {
                "text": "In light of the recent considerable interest in the processing of affect in spoken interactions, an analysis was performed of some corpus data of conversational speech, showing that the four prosodic characteristics, duration, pitch, power, and voicing all vary significantly according to interlocutor differences and to differences in familiarity and politeness over a fixed period of time with the same interlocutor.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6."
            },
            {
                "text": "The results showed significant differences in the prosodic characteristics of speech with others sharing the same native language as compared with those of non-native speakers of Japanese. The results also showed that speaking rate, pitch range, and spectral tilt varied significantly according to partner and position of the conversation in the three-month series.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6."
            },
            {
                "text": "Because different settings were used with different partners at the same time, the possibility can be discounted that these differences were due to unrelated external considerations such as variation in the health of the speaker.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6."
            },
            {
                "text": "The findings reported earlier for similar changes in phonation settings for a female speaker from a separate section of the corpus (see [Campbell 2005; Campbell and Mokhtari 2003 ]) under more varied conversational settings have been replicated here with data from a different speaker in a more controlled recording environment.",
                "cite_spans": [
                    {
                        "start": 136,
                        "end": 151,
                        "text": "[Campbell 2005;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 152,
                        "end": 178,
                        "text": "Campbell and Mokhtari 2003",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6."
            },
            {
                "text": "It is perhaps still too early to make use of these findings in speech technology, and considerable further work is required before strong claims can be made about the causes and relationships, but it is of interest that these differences exist at all. Listeners certainly make use of small but consistent speaking-style and phonation-setting changes to make inferences about the affective states of the speaker. Perhaps these variations will provide the foundation for both speech synthesis and speech recognition modules that begin to incorporate affect as one of the strands of meaning in speech. Such technology would be of great use in providing a softer interface between machines and humans in society.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6."
            },
            {
                "text": "For example, in Japanese, 'ie' means \"house\" while 'iie' with a longer first vowel means \"no\". Similarly, 'ka' is an interrogative particle, while 'ka-' with a lengthened vowel means \"car\". Such length-based lexical distinctions are common.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "The generation of affect in synthesised speech",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Cahn",
                        "suffix": ""
                    }
                ],
                "year": 1989,
                "venue": "Journal of the American Voice I/O Society",
                "volume": "8",
                "issue": "",
                "pages": "251--256",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cahn, J., \"The generation of affect in synthesised speech, \" Journal of the American Voice I/O Society, Vol c8, 1989, pp. 251-256.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Getting to the heart of the matter; speech as expression of affect rather than just text or language",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Campbell",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Language Resources & Evaluation",
                "volume": "39",
                "issue": "1",
                "pages": "109--118",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Campbell, N., \"Getting to the heart of the matter; speech as expression of affect rather than just text or language,\" Language Resources & Evaluation, 39(1), Springer, 2005, pp. 109-118.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Conversational Speech Synthesis and the Need for Some Laughter",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Campbell",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "IEEE Transactions on Audio, Speech, and Language Processing",
                "volume": "14",
                "issue": "4",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Campbell, N., \"Conversational Speech Synthesis and the Need for Some Laughter, \" IEEE Transactions on Audio, Speech, and Language Processing, 14(4), July 2006.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A Language-Resources Approach to Emotion: Corpora for the Analysis of Expressive Speech",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Campbell",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proc International Conference on Language Resources and Evaluation, LREC",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Campbell, N., \"A Language-Resources Approach to Emotion: Corpora for the Analysis of Expressive Speech,\" In Proc International Conference on Language Resources and Evaluation, LREC 2006.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Resources for the Processing of Affect in Interactions, \" Panel session",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Campbell",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Devillers",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Douglas-Cowie",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Auberge",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Batliner",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Tao",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proc LREC'06",
                "volume": "",
                "issue": "",
                "pages": "xxiv--xxvii",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Campbell, N., L. Devillers, E. Douglas-Cowie, V. Auberge, A. Batliner, and J. Tao, \"Resources for the Processing of Affect in Interactions, \" Panel session, In Proc LREC'06, Genoa, Italy, 2006, pp. xxiv-xxvii.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Voice Quality; the 4th prosodic parameter",
                "authors": [
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Campbell",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Mokhtari",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proc 15th ICPhS",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Campbell, N., and P. Mokhtari, \"Voice Quality; the 4th prosodic parameter, \" In Proc 15th ICPhS, Barcelona, Spain, 2003.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Beyond emotion archetypes; Databases for emotion modelling using neural networks",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Cowie",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Douglas-Cowie",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Cox",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "In Neural Networks",
                "volume": "18",
                "issue": "",
                "pages": "371--388",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cowie, R., E. Douglas-Cowie, and C. Cox, \"Beyond emotion archetypes; Databases for emotion modelling using neural networks, \" In Neural Networks 18, 2005, pp. 371-388.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Some problems in voice source analysis",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Fant",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Speech Communication",
                "volume": "13",
                "issue": "1",
                "pages": "7--22",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Fant, G., \"Some problems in voice source analysis,\" Speech Communication, 13(1), 1993, pp. 7-22.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Spectral correlates of glottal voice source waveform characteristics",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Gauffin",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Sundberg",
                        "suffix": ""
                    }
                ],
                "year": 1989,
                "venue": "Journal of Speech and Hearing Research",
                "volume": "32",
                "issue": "",
                "pages": "556--565",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gauffin, J., and J. Sundberg, ''Spectral correlates of glottal voice source waveform characteristics,\" Journal of Speech and Hearing Research, 32, 1989, pp. 556-565.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Using discourse content to guide pitch accent decisions in synthetic speech",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hirschberg",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "367--376",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hirschberg, J., ''Using discourse content to guide pitch accent decisions in synthetic speech, \" In G. Bailly and C. Benoit, ed, Talking Machines, North-Holland, 1992, pp. 367-376.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Acoustic and prosodic cues to speaking style in spontaneous and read speech",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Hirschberg",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Symposium on speaking styles",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hirschberg, J., ''Acoustic and prosodic cues to speaking style in spontaneous and read speech,\" In Symposium on speaking styles, Proc ICPhS , Stockholm, Sweden. 1995.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Vocal Communication of Emotion",
                "authors": [
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Johnstone",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [
                            "R"
                        ],
                        "last": "Scherer",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Handbook of Emotion",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Johnstone, T. and K. R. Scherer, \"Vocal Communication of Emotion,\" in: M. Lewis & J. Haviland (Eds.) Handbook of Emotion (2nd ed.). New York: Guildford. 2000.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "The classification of different phonation types in emotional and neutral speech",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Klasmeyer",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [
                            "F"
                        ],
                        "last": "Sendlmeier",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Forensic Linguistics",
                "volume": "4",
                "issue": "1",
                "pages": "104--124",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Klasmeyer, G., and W. F. Sendlmeier, \"The classification of different phonation types in emotional and neutral speech,\" Forensic Linguistics, 4(1), 1997, pp. 104-124.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Explaining phonetic variation: A sketch of the H&H theory",
                "authors": [
                    {
                        "first": "B",
                        "middle": [
                            "E F"
                        ],
                        "last": "Lindblom",
                        "suffix": ""
                    }
                ],
                "year": 1990,
                "venue": "Speech Producstion and Speech Modelling, NATO-ASI Series D: Behavioural and Social Sciences",
                "volume": "55",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lindblom, B. E. F., ''Explaining phonetic variation: A sketch of the H&H theory, \" In Speech Producstion and Speech Modelling, NATO-ASI Series D: Behavioural and Social Sciences, edited by H. J. Hardcastle and A. Marchal (Kluwer, Dordrecht), Vol 55, 1990.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Dimensional emotion representation as a basis for speech synthesis with non-extreme emotions",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Schroeder",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proc. Workshop on Affective Dialogue Systems: Lecture Notes in Computer Science",
                "volume": "",
                "issue": "",
                "pages": "209--220",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Schroeder, M., \"Dimensional emotion representation as a basis for speech synthesis with non-extreme emotions,\" In Proc. Workshop on Affective Dialogue Systems: Lecture Notes in Computer Science, Kloster Irsee, Germany, 2004, pp. 209-220.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Spectral tilt as a clue for linguistic stress",
                "authors": [
                    {
                        "first": "A",
                        "middle": [
                            "M C"
                        ],
                        "last": "Sluijter",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [
                            "J"
                        ],
                        "last": "Van Heuven",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sluijter, A. M. C., and V. J. van Heuven, \"Spectral tilt as a clue for linguistic stress, \" presented at 127th ASA, Cambridge, MA. 1994.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "An Introduction to Spoken Interaction. Longman, London",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Stenstr\u00f6m",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Stenstr\u00f6m, A., An Introduction to Spoken Interaction. Longman, London. 1994.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "Plots of mean, maximum and minimum f0 values observed in the data of each of the interlocutors. The box-plots show median and interquartile values, with whiskers extending to 1.5 times the interquartile range. All F0 measurements are converted to their log values for ease of comparison. Plots of mean, maximum and minimum rms amplitude (speech signalpower) values for each of the interlocutors.",
                "num": null,
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF1": {
                "text": "plots f0 range for comparison with power range across the same set of partners.It shows a slightly higher range of f0 activity when this subject is talking with the English female than with the Chinese male partner. Both plots are log-scaled and show an average of Showing the Effects of Affect in Conversational Speech 55Hz ( (4) exp ) median pitch range with a 30dB average power range when conversing with these different interlocutors. Power is noticeably higher when talking with Japanese native-speaker interlocutors, and lowest when talking with the female Chinese native-speaker.Needless to say, microphone distances (head-mounted) and record-level settings remained unchanged across all recordings. Ranges of fundamental frequency and power measurements for the affective utterances, plotted by interlocutor. As above, C, E, J stand for Chinese, English, Japanese respectively, and F, M represent female and male interlocutors. Both f0 and power are plotted as log values.",
                "num": null,
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF2": {
                "text": "shows three measures of averaged spectral energy for the 11,750 affective utterances under examination. The low-frequency part of the spectrum is shown measured in decibels, as is customary in plots of spectral sections, but the middle and right-hand plots",
                "num": null,
                "uris": null,
                "type_str": "figure"
            },
            "FIGREF3": {
                "text": "shows differences in these values over time. The upper three plots show low-band, mid-band, and high-band energy measures for conversations with Japanese male partner JMB. The lower part of the figure shows only the high-band energy differences (spectral slope measures) for the four non-native partners. In each case there is a general trend towards decrease in steepness of the spectral slope with time. From the top plots (of the series of conversations with partner JMB), the second, third and penultimate conversation exhibited high low-frequency spectral energy (from the left-hand plot), steep falloff in mid-band energy (from the central plot), and, at least for the Spectral slope differences across time by interlocutor. The top part shows all three spectral bands for partner JMB, and the bottom part shows the difference between mid-band and high-band energy for each of the non-native partners.",
                "num": null,
                "uris": null,
                "type_str": "figure"
            },
            "TABREF1": {
                "text": "",
                "content": "<table><tr><td>a</td><td>a-</td><td>a-</td><td>a-</td><td>a.a-</td><td>a-.hai</td></tr><tr><td>296</td><td>368</td><td>693</td><td>608</td><td>390</td><td>386</td></tr><tr><td>a.hai</td><td>a-.n</td><td>ano</td><td>ano-</td><td>a!!</td><td>demo</td></tr><tr><td>577</td><td>368</td><td>337</td><td>494</td><td>927</td><td>272</td></tr><tr><td>e-</td><td>e-</td><td>ee</td><td>fun</td><td>fu-n</td><td>fu-n</td></tr><tr><td>665</td><td>254</td><td>2679</td><td>642</td><td>625</td><td>273</td></tr><tr><td>ha.ai</td><td>hai</td><td>ha-i</td><td>hai.hai.hai</td><td>n(umm)</td><td>n-</td></tr><tr><td>978</td><td>7295</td><td>1657</td><td>378</td><td>265</td><td>456</td></tr><tr><td>n-</td><td>nanka</td><td>ne-</td><td>nee</td><td>@S</td><td>sou</td></tr><tr><td>410</td><td>273</td><td>367</td><td>284</td><td>3382</td><td>810</td></tr><tr><td>su-</td><td>su-</td><td>un</td><td>u-n</td><td>u-n</td><td>u-n</td></tr><tr><td>429</td><td>296</td><td>3717</td><td>2401</td><td>1243</td><td>333</td></tr><tr><td>un.un</td><td>@W</td><td>zu-</td><td>zu-</td><td/><td/></tr><tr><td>351</td><td>3041</td><td>1348</td><td>467</td><td/><td/></tr></table>",
                "num": null,
                "type_str": "table",
                "html": null
            },
            "TABREF2": {
                "text": "",
                "content": "<table><tr><td/><td>0%</td><td>25%</td><td>50%</td><td>75%</td><td>100%</td></tr><tr><td>low-band</td><td>-72</td><td>-48</td><td>-46</td><td>-44</td><td>-21</td></tr><tr><td>mid-band</td><td>-11</td><td>12</td><td>14</td><td>16</td><td>38</td></tr><tr><td>high-band</td><td>-18</td><td>7</td><td>9</td><td>11</td><td>37</td></tr><tr><td>s1</td><td/><td>s2</td><td/><td/><td>s3</td></tr></table>",
                "num": null,
                "type_str": "table",
                "html": null
            }
        }
    }
}