File size: 74,876 Bytes
6fa4bc9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 |
{
"paper_id": "O07-3003",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:08:19.000049Z"
},
"title": "Affective Intonation-Modeling for Mandarin Based on PCA",
"authors": [
{
"first": "Zhuangluan",
"middle": [],
"last": "Su",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Science and Technology of China",
"location": {
"postCode": "230027",
"settlement": "Hefei",
"country": "China"
}
},
"email": ""
},
{
"first": "Zengfu",
"middle": [],
"last": "Wang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Science and Technology of China",
"location": {
"postCode": "230027",
"settlement": "Hefei",
"country": "China"
}
},
"email": "zfwang@ustc.edu.cn"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "The speech fundamental frequency (henceforth F0) contour plays an important role in expressing the affective information of an utterance. The most popular F0 modeling approaches mainly use the concept of separating the F0 contour into a global trend and local variation. For Mandarin, the global trend of the F0 contour is caused by the speaker's mood and emotion. In this paper, the authors address the problem of affective intonation. For modeling affective intonation, an affective corpus has been designed and established, and all intonations are extracted with an iterative algorithm. Then, the concept of eigen-intonation is proposed based on the technique of Principal Component Analysis on the affective corpus and all the intonations are transformed to the lower-dimensional eigen sub-space spanned by eigen-intonations. A model of affective intonations is established in the sub-space. As a result, the corresponding emotion (maybe a mixed emotion) can be expressed by speech whose intonation is modified according to the above model. The experiments are performed with the affective Mandarin corpus, and the experimental results show that the intonation modeling approach proposed in this paper is efficient for both intonation representation and speech synthesis.",
"pdf_parse": {
"paper_id": "O07-3003",
"_pdf_hash": "",
"abstract": [
{
"text": "The speech fundamental frequency (henceforth F0) contour plays an important role in expressing the affective information of an utterance. The most popular F0 modeling approaches mainly use the concept of separating the F0 contour into a global trend and local variation. For Mandarin, the global trend of the F0 contour is caused by the speaker's mood and emotion. In this paper, the authors address the problem of affective intonation. For modeling affective intonation, an affective corpus has been designed and established, and all intonations are extracted with an iterative algorithm. Then, the concept of eigen-intonation is proposed based on the technique of Principal Component Analysis on the affective corpus and all the intonations are transformed to the lower-dimensional eigen sub-space spanned by eigen-intonations. A model of affective intonations is established in the sub-space. As a result, the corresponding emotion (maybe a mixed emotion) can be expressed by speech whose intonation is modified according to the above model. The experiments are performed with the affective Mandarin corpus, and the experimental results show that the intonation modeling approach proposed in this paper is efficient for both intonation representation and speech synthesis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Speech can convey not only literal meanings, but also the mood and emotion of a speaker. Some researchers have proven that the contour of the speech fundamental frequency (henceforth F0 contour) plays an important role in expressing the affective information of an utterance. It is concluded that some statistical characteristics of F0 play the most important roles in emotion perception [Tao and Kang 2005] . Especially, F0 contours differ from each other because of the speaker's different emotion in Mandarin [Yuan et al. 2002] . Due to significance of F0, the F0 contour modeling is one of the key issues that should be addressed.",
"cite_spans": [
{
"start": 388,
"end": 407,
"text": "[Tao and Kang 2005]",
"ref_id": "BIBREF8"
},
{
"start": 512,
"end": 530,
"text": "[Yuan et al. 2002]",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The most popular F0 modeling approaches mainly use the concept of separating the F0 contour into a global trend and local variation [Abe and Sato 1992; Bellegarda et al, 2001] . Mandarin is a tonal language including four basic tone types and a so-called 'light' tone. The F0 contour is composed of three elements [Zhao 1980] : the tone of the syllable, the variety of tone in continuous utterance, and the movement influenced by mood. How to extract tones and intonations from speech is a difficult problem. Tian and Nurminen have proposed a data-driven tone modeling approach to describe the tonal element [Tian and Nurminen 2004] . In previous work [Su and Wang 2005] , the authors of this paper also proposed an affective-tone modeling approach for Mandarin to separate F0 contour into two elements: variational tones based on syllables and intonations for prosody phrases.",
"cite_spans": [
{
"start": 132,
"end": 151,
"text": "[Abe and Sato 1992;",
"ref_id": "BIBREF0"
},
{
"start": 152,
"end": 175,
"text": "Bellegarda et al, 2001]",
"ref_id": "BIBREF2"
},
{
"start": 314,
"end": 325,
"text": "[Zhao 1980]",
"ref_id": null
},
{
"start": 608,
"end": 632,
"text": "[Tian and Nurminen 2004]",
"ref_id": "BIBREF9"
},
{
"start": 652,
"end": 670,
"text": "[Su and Wang 2005]",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "In this paper, the authors propose a data-driven intonation modeling approach based on Principal Component Analysis (henceforth, PCA [Fukunaga 2000] ). For modeling affective intonations, an affective corpus of Mandarin has been designed and the corresponding intonations are extracted with an iterative algorithm from the original speech. The eigen-intonation concept is proposed based on the principal components of the above intonations obtained from the affective corpus, and all the intonations are then transformed into the sub-space spanned by the eigen-intonations. The distribution of affective intonations corresponding to an emotion in the above sub-space is a help to establish the corresponding affective intonation model. As a result, speech whose intonation is modified according to the model can express the corresponding emotion, even mixed emotions. In addition, the authors will also show emotion perception results using the proposed modeling approach.",
"cite_spans": [
{
"start": 133,
"end": 148,
"text": "[Fukunaga 2000]",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "The remainder of the paper is organized as follows. The speech corpus and some statistic results of F0 based on the database are described first. Then, the algorithm of eigen-intonation extraction is described, and some of the basic properties of the eigen-intonation representation are concluded. Next, how to model the affective intonation is discussed. Last, the performance of the proposed modeling approach is given by experimental results.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1."
},
{
"text": "Carrying on the affective speech research, a reasonable classification of the emotion is needed first, and then the speech features with different emotions can be analyzed effectively. In emotional psychology, Robert Plutchik proposed a four pair emotional ring constructed of eight pure emotions, including anger, joy, acceptance, surprise, fear, sadness, hatred and expectation. In the affective speech research for Mandarin, four emotions are generally selected, either including anger, joy, fear, sadness [Yuan et al. 2002; Tao and Kang 2005] , or including anger, joy, surprise and sadness [Zhao et al. 2004] . In contrast, five emotions are selected for this paper, and they are anger, joy, surprise, fear and sadness.",
"cite_spans": [
{
"start": 509,
"end": 527,
"text": "[Yuan et al. 2002;",
"ref_id": "BIBREF10"
},
{
"start": 528,
"end": 546,
"text": "Tao and Kang 2005]",
"ref_id": "BIBREF8"
},
{
"start": 595,
"end": 613,
"text": "[Zhao et al. 2004]",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Speech Corpus and Statistic Results of F0",
"sec_num": "2."
},
{
"text": "What is discussed in this paper is the global variety of the F0 contour, so a reasonable duration of the target needs to be considered. Due to the multi-level structure of prosody [Abney 1995; Li et al. 2000] , a complicated sentence with many syllables can be divided into several simple prosody units with fewer syllables at prosody boundaries. So, studying intonation based on prosody units can transform this complicated problem into several simple ones. Moreover, it is known that prosodic phrases can keep a relatively stable intonation pattern. Therefore, the authors model intonation based on prosodic phrases in the paper.",
"cite_spans": [
{
"start": 180,
"end": 192,
"text": "[Abney 1995;",
"ref_id": "BIBREF1"
},
{
"start": 193,
"end": 208,
"text": "Li et al. 2000]",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Speech Corpus and Statistic Results of F0",
"sec_num": "2."
},
{
"text": "It is known that F0 contour is influenced by several factors, including syntax, stress, speaker's emotion and his or her individual character. This paper focuses on the movement of intonation caused by emotion, and the influence of other factors such as syntax, stress, and the individual characters will not be considered. Currently, there are no effective methods that can eliminate the influence of these factors from the original speech signals directly, so the corpus used in the paper are obtained in such a way as to avoid these interferential factors' influence.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Speech Corpus and Statistic Results of F0",
"sec_num": "2."
},
{
"text": "To avoid unwanted factors' influence and to simplify the following processing, the corpus is designed with some limitations. The authors have designed 40 sentences with different literal contents for the following test, and each sentence only consists of three components: subject, verb, and object. Furthermore, the subject, verb, and the object are all designed to be disyllabic words. So, each sentence only has 6 syllables in this case, and all of these sentences have the same syntax. As the length of a prosodic phrase is approximately six syllables [Zhao et al. 2002] , each sentence consists of only one prosodic phrase. An example of such a sentence is given by \"\uf963\u4eac\u53ec\u5f00\u5965\u8fd0\". This design can be advantageous to the following experiments, and the model will be established directly based on one sentence. Each sentence is then performed by a female actor with all six emotions, including fear, sadness, neutral, anger, joy and surprise. In the end, the corpus used for analysis contains 240 total sentences, consisting of 1,440 syllables from a single speaker, with same syntax and the same individual characters. The speech signals are digitized at 16 kHz with 16-bit precision.",
"cite_spans": [
{
"start": 556,
"end": 574,
"text": "[Zhao et al. 2002]",
"ref_id": "BIBREF12"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Speech Corpus and Statistic Results of F0",
"sec_num": "2."
},
{
"text": "To evaluate the representational ability of the corpus, some experiments about the distributions of F0 are performed. Here, the F0 of a speech is extracted by using a modified autocorrelation algorithm. The results are demonstrated in Figure 1 . Figure 1 shows that \"surprise\", \"happy\" and \"angry\" make a very high F0, while \"sad\" generates lower value than the neutral state. It can also be found that the varying range of \"sad\" is smaller than the others. F0 parameters of \"fear\" make quite similar behaviors as \"sad\". \"Angry\", \"happy\", and \"surprise\" also behave similarly. All of the results accord with the conclusions given by other researches [Yuan et al. 2002; Zhao et al. 2004; Tao and Kang 2005] . So the speech corpus is representational and effective for the following analysis.",
"cite_spans": [
{
"start": 650,
"end": 668,
"text": "[Yuan et al. 2002;",
"ref_id": "BIBREF10"
},
{
"start": 669,
"end": 686,
"text": "Zhao et al. 2004;",
"ref_id": "BIBREF11"
},
{
"start": 687,
"end": 705,
"text": "Tao and Kang 2005]",
"ref_id": "BIBREF8"
}
],
"ref_spans": [
{
"start": 235,
"end": 243,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 246,
"end": 254,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Speech Corpus and Statistic Results of F0",
"sec_num": "2."
},
{
"text": "The affective intonation will be modeled with a concept called \"eigen-intonation\". The concept of eigen-intonation is derived through the use of the PCA technique. PCA [Fukunaga 2000 ] is a multivariate analysis method that carries out a compact description of a data set. In a PCA process, a set of correlated variables is transformed into a set of uncorrelated variables that are ordered by reducing variability, and these new uncorrelated variables are linear combinations of the original variables. It can be concluded that the first new variable contains the greatest amount of variation; the second contains the next greatest residual variance and orthogonal to the first, and so on. Thus, the last of these variables can be removed with a minimal loss of real data.",
"cite_spans": [
{
"start": 168,
"end": 182,
"text": "[Fukunaga 2000",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Concept of Eigen-Intonation",
"sec_num": "3."
},
{
"text": "With the affective corpus in the paper, the speech intonations for sentences should be very similar in all configurations, and they should be able to be described by some \"basic intonations\". From the previous description, one knows that one of the main functions of PCA is that it can be used to extract new uncorrelated features from original data. According to these ideas, one can find the \"basic intonations\" that best account for distribution of speech intonations within the entire intonation space using the principal components analysis. The \"basic intonations\" are called \"eigen-intonations\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Concept of Eigen-Intonation",
"sec_num": "3."
},
{
"text": "With eigen-intonation, original intonations can be transformed to corresponding representations with lower dimensions. Some rules can also be possibly given out in the low-dimensional space. Moreover, the resultant rules with low dimensions have simpler expression, and it is advantageous to control the rules for the goal of this study. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Concept of Eigen-Intonation",
"sec_num": "3."
},
{
"text": "The concept of eigen-intonation is proposed based on PCA technique. Mathematically, the principal component analysis involves an eigen analysis on a covariance matrix. A good low-dimensional representation in the space of possible speech intonations can be achieved by considering only a few principal components or eigenvectors, corresponding to the first largest eigenvalues.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Analysis for Eigen-Intonation",
"sec_num": "4."
},
{
"text": "In order to obtain the intonation of a speech, the F0 contour of the speech should be extracted first. After that, the F0 contour will be separated into a global variety, which is regarded as intonation, and rapidly-varying components corresponding to local changes based on syllables. The details of intonation extraction are described in the following.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "The entire intonation extracting algorithm can be divided into five main steps: 1) Estimating initial F0 values based the modified normalized autocorrelation from voiced regions of the original speech.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "2) Cubic Hermite interpolating for unvoiced regions and obtaining a continuous F0 curve.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "3) Filtering the continuous F0 contour with two serial modified smoothing processes. 4) Applying piecewise three-order polynomial iterative fitting to the entire F0 contour, the n-th iterative processing step is as:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "(a) Fitting the entire F0 contour with n pieces of cubic polynomial.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "(b) Calculating the fitting error E n .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "(c) If E n < E t , ending the iterative algorithm and taking n pieces of cubic polynomial fitting as final resultant F0 contour. Else, n = n + 1, go to (a). Where E t is a given threshold of maximal fitting error.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "5) The ln(F0) contour is passed through a high-pass filter with a stop frequency at 0.5Hz, and the residual low frequency contour after filtering is denoted as L F contour.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "From the authors' previous work [Su and Wang 2005] , The L F contour can be regarded as the F0 global variety of a speech. As all sentences have the same syntax and each sentence consists of only one prosodic phrase in this corpus, the model can be established directly based on one sentence. It is to say that the resultant L F contour of the algorithm for each sentence in the corpus is the modeling target, intonation based prosodic phrase (henceforth intonation). Finally, each intonation is normalized into an N-dimensional vector (N = 100 in the paper).",
"cite_spans": [
{
"start": 32,
"end": 50,
"text": "[Su and Wang 2005]",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Extraction of Intonation",
"sec_num": "4.1"
},
{
"text": "Let the data set of intonations be I 1 , I 2 , \u2026 I M , where I i is an N-dimensional intonation sample, and M is the number of intonations (M = 240 in the paper). Then the intonation covariance",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "matrix C N \u00d7 N is computed by (1). 1 1 ( )( ) M T i i i C I m I m M = = \u2212 \u2212 \u2211",
"eq_num": "(1)"
}
],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "Where, m is the average intonation calculated by (2).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "1 1 M i i m I M = = \u2211 (2)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "The differential intonations matrix A is defined as (3).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "[ ]",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "1 2 1 , ,..., M A I m I m I m M = \u2212 \u2212 \u2212",
"eq_num": "(3)"
}
],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "Then, C = AA T is an N \u00d7 N covariance matrix. The eigen analysis on the covariance matrix C N \u00d7 N yields a set of positive eigenvalues {\u03bb 1 , \u03bb 2 , \u2026, \u03bb N } in descending order and the",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "corresponding eigenvectors, {V 1 , V 2 , \u2026, V N ). The first L (L < N) eigenvectors, denoted as U = {V i , i = 1, 2,\u2026",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": ", L}, are selected as principal components, and the intonations corresponding to these L vectors are so-called eigen-intonations, denoted as U o .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "The eigen sub-space spanned by the principal components U is called sub-space of intonation, denoted as P, and the original space of intonation is denoted as O. All intonations in O can be projected to be the corresponding representations in P. It is known that the dimension of P is lower than that of O, and one can establish the rules of intonation in P and then restore the resultant intonations in O. Obviously, rules with lower dimension are easily controlled. Next, restoration of intonation will be discussed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "PCA for Intonation",
"sec_num": "4.2"
},
{
"text": "According to the principal component analysis, the original intonations in O are projected into the sub-space P as (4).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "( ), 1, 2, ..., T k k U I m k M \u2126 = \u2212 =",
"eq_num": "(4)"
}
],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "Where, \u2126 k is coordinate vector of the k-th intonation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "With \u2126, the intonation samples are restored as (5), and the final approximation of the original intonations I is given out as (6), denoted as J.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "B U = \u2126 (5) 1, 2, ..., k k J B m k M = + =",
"eq_num": "(6)"
}
],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "Especially, let B = U in (6), intonations corresponding to U can be given out, and that are eigen-intonations U o . It can be concluded that although U o is higher than U, the configuration of U o is same as U. So the authors do not distinguish them when their configurations are discussed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": "To evaluate the ability of restoration, the restoring rate for k-th intonation is defined as (7).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Restoration Based on PCA",
"sec_num": "4.3"
},
{
"text": ", 1, 2, ...,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "1",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "k k k k I J R k M I \u2212 = \u2212 =",
"eq_num": "(7)"
}
],
"section": "1",
"sec_num": null
},
{
"text": "The final restoring rate of the entire algorithm is defined as (8).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "1",
"sec_num": null
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "1 / 100% M k k r R M = \u23a7 \u23ab = \u00d7 \u23a8 \u23ac \u23a9 \u23ad \u2211",
"eq_num": "(8)"
}
],
"section": "1",
"sec_num": null
},
{
"text": "5. Affective Intonation",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "1",
"sec_num": null
},
{
"text": "Affective intonation is the concept that a speech with a certain affective intonation can express a corresponding emotion. Some works of speech prosody have proposed much qualitative analysis for affective intonations, and this paper will try to give quantitative affective intonation rules. At last, speech whose intonation is modified according to a certain affective intonation obtained in the paper can express the corresponding emotion.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "About Affective Intonation",
"sec_num": "5.1"
},
{
"text": "In order to research affective problems, emotion can be classified. Robert Plutchik [Plutchik 1960 ] considered that the emotions felt in normal human life were complicated and mixed, and considered some intensity of the eight pure emotions constructing a mixed emotion. So, in a similar way to him, all the mixed-emotional intonations are supposed to be defined by some vectors in the form of linear combination of the coefficients in the paper, where the vectors are the principal components U and the coefficient is the coordinate vector \u2126 k in (4). Based on this assumption, one can easily change the coefficient corresponding to a certain eigen-intonation to control some configuration of final affective intonation for the goal. How to perform the assumption is discussed in the following.",
"cite_spans": [
{
"start": 84,
"end": 98,
"text": "[Plutchik 1960",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "About Affective Intonation",
"sec_num": "5.1"
},
{
"text": "Let the set of emotions be a, a = 1, 2,\u2026,6 representing anger, joy, surprise, fear, sadness and neutral emotional state. Intonations extracted from the speeches with emotion a are denoted as N-dimensional vector I a in original space O. Let I = I a in (4), and I a be projected into the sub-space P, denoted as \u2126 a . \u2126 a is distributed in different regions in P for the different emotions a, and the mass kernel vectors \u03b1 \u2126 are computed as (9). Where T a are the final affective rule-intonations (henceforth rule-intonations) and they can be applied directly to modify the target intonation for synthesizing affective speech, which will be performed in the following experiments.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Modeling Affective Intonation",
"sec_num": "5.2"
},
{
"text": "To demonstrate the eigen-intonations, a PCA experiment using the affective speech corpus was performed. The first six principal components U are shown in Figure 2 and the authors do not distinguish the principal components selected and eigen-intonations here. It can be seen that the varying range of the first component is the smallest, and it is also the highest. So the",
"cite_spans": [],
"ref_spans": [
{
"start": 154,
"end": 162,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Analysis on Eigen-Intonation",
"sec_num": "6.1"
},
{
"text": "first eigen-intonation represents the flat and positive pitch. The second eigen-intonation contributes a big rising component, and the third matches a falling intonation with a little rising at the end. The fourth can be viewed as adding a falling part to the end of the third. The varying ranges are same between the fifth and the sixth, and their global trends are flat with big rising and falling varying. These two can be viewed as adding a rising or a falling part to the end of the previous component. It will be known that the sixth component contains a very small contribution of energy or variance to the intonation contour in the following analysis.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 2. Eigen-intonation of the affective speech",
"sec_num": null
},
{
"text": "Based on the previous resultant eigen-intonations, the authors carry out the restoring experiment using L components selected, respectively considering L be 3, 4, 5 and 6. The results are shown in Table 1 . From Table 1 , it can be concluded that selecting five components is acceptable, but with six principal components, the restoring rate is 99.89% and the approximation error is almost equal to zero. The approximating examples are shown in Figure 3 . That means a good six-dimensional representation for the space of all speech intonations is achieved, and these eigen-intonations are very efficient for intonation representation.",
"cite_spans": [],
"ref_spans": [
{
"start": 197,
"end": 204,
"text": "Table 1",
"ref_id": "TABREF0"
},
{
"start": 212,
"end": 219,
"text": "Table 1",
"ref_id": "TABREF0"
},
{
"start": 445,
"end": 453,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Figure 2. Eigen-intonation of the affective speech",
"sec_num": null
},
{
"text": "The emotional state expressed by intonation of each affective speech in the corpus is known, and there are six categories of emotions, including the neutral state. And there are 40 speeches within each emotional state. According to Section 5.2, all affective intonations labeled with From Figure 4 , one can see that the kernel of surprise, job and anger is far from that of neutral, where the \"surprise\" is farthest and then \"angry\" is next. However, the \"fear\" almost distributes in the same region with \"sad\", and they can be distinguished from the neutral emotional region. In addition, it can be known from analysis on eigen-intonations that the last several weights corresponding to these three weights in the figure contain a very small contribution of energy or variance, so the difference of their distribution is not as clear as in Figure 4 . Now the projecting vectors \u2126 a in P of original intonations labeled with emotion are given out as well as the corresponding kernel vector \u03b1 \u2126 for each emotional state. By restoring with eigen-intonations, the kernel vectors are transformed as (10) into the original space, there they are regarded as rule-intonations. The rule-intonations representing emotion states are illustrated in Figure 5 . From the figure, one can see that the intonations of anger, job and surprise are high, where the variety of surprise is greatest. However, the \"fear\" is flat and low, similar to that of the \"sad\". All these qualitative results are in line with the previous works of other researchers. So the resultant rule-intonations are efficient for expressing emotions in theory. ",
"cite_spans": [],
"ref_spans": [
{
"start": 289,
"end": 297,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 842,
"end": 850,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 1239,
"end": 1247,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Modeling Affective Intonation",
"sec_num": "6.2"
},
{
"text": "When the affective rule-intonation was modeled with eigen-intonation in the previous sub-section, the emotion labeled in the corpus and expressed by resultant intonation was supposed to be pure. It is known that the emotions of humans felt in normal life are not always so simple, and they are usually mixed with several so-called pure emotions, whose intensities differ corresponding to constructing the different emotions. The experiment is performed as the following to explain that the modeling approach proposed with eigen-intonation is also effective for representing the mixed-emotional intonation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The Mixed-Emotional Intonation",
"sec_num": "6.3"
},
{
"text": "All affective intonations labeled with emotions have been projected into sub-space P and the distribution of first three weights of the projecting vector \u2126 a in P has been shown in Figure 4 . Now only the mass kernel of each emotional state, which is corresponding to the resultant rule-intonation, is represented in Figure 6 .",
"cite_spans": [],
"ref_spans": [
{
"start": 181,
"end": 189,
"text": "Figure 4",
"ref_id": "FIGREF3"
},
{
"start": 317,
"end": 325,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Mixed-Emotional Intonation",
"sec_num": "6.3"
},
{
"text": "Nine equal space points in line between the neutral kernel and the surprise kernel are selected and indicated in the figure. If the kernel explains pure emotions, then what the points selected explain are the mixed emotions. Along the arrow in Figure 6 , points at the starting vertex explain more neutral and those at the ending vertex explain more surprise. So the emotions expressed by the intonations correspond to these points transfer from neutral to surprise along the arrow and they are mixed. The mixed-emotional intonations corresponding to the selected-points are restored in original space and shown in the left of Figure 7 . It can be concluded from the figure that, along the arrow, the first rule-intonations can express more neutral and the last ones express more surprise and all of them express the mixed emotions.",
"cite_spans": [],
"ref_spans": [
{
"start": 244,
"end": 252,
"text": "Figure 6",
"ref_id": null
},
{
"start": 627,
"end": 635,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "The Mixed-Emotional Intonation",
"sec_num": "6.3"
},
{
"text": "Another nine equal space points between the happy kernel and the surprise kernel are also selected and the same experiment is performed. The illustrations of the experiment are shown in Figure 6 and the right of Figure 7 .",
"cite_spans": [],
"ref_spans": [
{
"start": 186,
"end": 194,
"text": "Figure 6",
"ref_id": null
},
{
"start": 212,
"end": 220,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Figure 5. Affective rule-intonations T \u03b1",
"sec_num": null
},
{
"text": "The arrows in the figure indicate the gradual varying direction corresponding to that in sub-space showed in Figure 6 and each gradual changing curve is corresponding to one point selected in Figure 6 . Figure 7 show that the mixed-emotional intonation can be represented with eigen-intonation, so one can control the relative position of intonation-representation in the sub-space to explain the certain mixed-emotion felt in the usual human life. To sum up, the modeling approach proposed with eigen-intonation is effective for representing not only the simple emotional intonation but also the mixed-emotional intonation.",
"cite_spans": [],
"ref_spans": [
{
"start": 109,
"end": 117,
"text": "Figure 6",
"ref_id": null
},
{
"start": 192,
"end": 200,
"text": "Figure 6",
"ref_id": null
},
{
"start": 203,
"end": 211,
"text": "Figure 7",
"ref_id": null
}
],
"eq_spans": [],
"section": "Note:",
"sec_num": null
},
{
"text": "Based on the linear predictive coding technology [Quatieri 2004 ], the authors analyzed neutral speeches, modified their intonations with the six rule-intonations, respectively, and re-synthesized them. For example, the intonation of a neutral speech is modified to the surprise intonation, and the demonstration is shown as Figure 8 . In the figure, the top is the waveform of the neutral speech, and the bottom includes the original F0 contour, the original intonation, the modified intonation, and the resultant F0 contour of the neutral speech. Moreover, the intonation of an original surprise speech is also plotted in the bottom figure for contrast. Figure 8 shows that the modified intonation is similar to the original intonation of the surprise speech, and the resultant F0 contour is higher than expressing surprise.",
"cite_spans": [
{
"start": 49,
"end": 63,
"text": "[Quatieri 2004",
"ref_id": "BIBREF6"
}
],
"ref_spans": [
{
"start": 325,
"end": 333,
"text": "Figure 8",
"ref_id": null
},
{
"start": 656,
"end": 664,
"text": "Figure 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "Synthesis with Affective Intonation",
"sec_num": "6.4"
},
{
"text": "In the perception experiment, the listener was asked to judge the emotional state of the speech sound. The results show that, though it is difficult to distinguish anger from happy, and also can not point out whether the speech sounded closer to fear or sadness, it is easy to tell the emotional states such as joy, surprise, and fear of one speech. So one can conclude that the rule-intonations are almost corresponding to the emotional state and the eigen-intonation modeling method is efficient.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Figure 8. Illustration for modifying intonation with surprise rule-intonation",
"sec_num": null
},
{
"text": "The F0 contour plays an important role in expressing the affective information of an utterance, and the most popular F0 modeling approaches are mainly using the concept of separating the F0 contour into a global trend and local variation. Mandarin is a tonal language, and the global trend of F0 contour is caused by speaker's mood and emotion, which is focused on in this paper, and that is called affective intonation. Affective intonation is the concept that a speech with a certain affective intonation can express a corresponding emotion. Some works of speech prosody have proposed much qualitative analysis for affective intonations, and the paper has given out quantitative rule-intonation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7."
},
{
"text": "In order to establish the model of affective intonation, an affective corpus of Mandarin was obtained with some limitation for affective research goal and all intonations were extracted from the original speeches. Then the eigen-intonation concept was proposed basing PCA on the affective corpus and all the intonations were transformed to lower-dimensional representations in the eigen sub-space spanned by eigen-intonations. A model of affective intonations was established in the sub-space and then was restored in the original space of intonation to form the rule-intonations. As a result, speech whose intonation is modified according to a certain rule-intonation can express the corresponding emotion, even the mixed emotion.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7."
},
{
"text": "The authors have performed experiments with the affective Mandarin corpus. And the experimental results are in line with the theoretical analysis and the intonation modeling approach proposed is proved to be efficient for representing the simple emotional and mixed-emotional intonation. Future work will focus on how to accurately give out the boundaries of the pure emotional regions in sub-space with eigen-intonation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7."
}
],
"back_matter": [
{
"text": "This work is supported by Open Foundation of National Laboratory of Pattern Recognition, China. The authors would like to thank Dr. Tieniu Tan and Dr. Jianhua Tao for their help.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Two-stage F0 control model using syllable based F0 units",
"authors": [
{
"first": "M",
"middle": [],
"last": "Abe",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Sato",
"suffix": ""
}
],
"year": 1992,
"venue": "Proceedings of International Conference on Acoustics, Speech, and Signal Processing",
"volume": "",
"issue": "",
"pages": "53--56",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Abe, M., and H. Sato, \"Two-stage F0 control model using syllable based F0 units,\" in Proceedings of International Conference on Acoustics, Speech, and Signal Processing, San Francisco, USA, 1992\uff0cpp.53-56.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Chunks and dependencies: Bringing processing evidence to bear on syntax",
"authors": [
{
"first": "S",
"middle": [],
"last": "Abney",
"suffix": ""
}
],
"year": 1995,
"venue": "Computational Linguistics and the Foundations of Linguistic Theory",
"volume": "",
"issue": "",
"pages": "145--164",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Abney, S., \"Chunks and dependencies: Bringing processing evidence to bear on syntax,\" in Jennifer Cole and Georgia Green and Jerry Morgan(Eds.): Computational Linguistics and the Foundations of Linguistic Theory, pp. 145-164, CSLI, 1995.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Statistical prosodic modeling: from corpus design to parameter estimation",
"authors": [
{
"first": "J",
"middle": [],
"last": "Bellegarda",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Silverman",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Lenzo",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Anderson",
"suffix": ""
}
],
"year": 2001,
"venue": "IEEE Trans. Speech and Audio Processing",
"volume": "9",
"issue": "1",
"pages": "52--66",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bellegarda, J., K. Silverman, K. Lenzo, and V. Anderson, \"Statistical prosodic modeling: from corpus design to parameter estimation,\" IEEE Trans. Speech and Audio Processing, 9(1), 2001, pp. 52-66.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Introduction to statistical pattern recognition",
"authors": [
{
"first": "K",
"middle": [],
"last": "Fukunaga",
"suffix": ""
}
],
"year": 2000,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Fukunaga, K., Introduction to statistical pattern recognition, Academic Press, Dordrecht, 2000.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Speech corpus of Chinese discourse and the phonetic research",
"authors": [
{
"first": "A",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "X",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Zu",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Hua",
"suffix": ""
},
{
"first": "Z",
"middle": [],
"last": "Yin",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Yan",
"suffix": ""
}
],
"year": 2000,
"venue": "Proceedings of Sixth International Conference on Spoken Language Processing",
"volume": "",
"issue": "",
"pages": "13--18",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Li, A., M. Lin, X. Chen, Y. Zu, G. Sun, W. Hua, Z. Yin, and J. Yan, \"Speech corpus of Chinese discourse and the phonetic research,\" in Proceedings of Sixth International Conference on Spoken Language Processing, 2000, Beijing, China, pp. 13-18.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "The multifactor-analytic theory of emotion",
"authors": [
{
"first": "R",
"middle": [],
"last": "Plutchik",
"suffix": ""
}
],
"year": 1960,
"venue": "Journal of Psychology",
"volume": "50",
"issue": "",
"pages": "153--171",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Plutchik, R. \"The multifactor-analytic theory of emotion,\" Journal of Psychology, 50, 1960, pp. 153-171.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Discrete-Time Speech Signal Processing: Principles and Practice, House of Electronics Industry",
"authors": [
{
"first": "T",
"middle": [
"F"
],
"last": "Quatieri",
"suffix": ""
}
],
"year": 2004,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Quatieri, T. F., Discrete-Time Speech Signal Processing: Principles and Practice, House of Electronics Industry, Beijing, 2004.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "An Approach to Affective-Tone Modeling for Mandarin",
"authors": [
{
"first": "Z",
"middle": [],
"last": "Su",
"suffix": ""
},
{
"first": "Z",
"middle": [],
"last": "Wang ; By",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tao",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Tan",
"suffix": ""
},
{
"first": "R",
"middle": [
"W"
],
"last": "Picard",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "390--396",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Su, Z., and Z. Wang, \"An Approach to Affective-Tone Modeling for Mandarin,\" Lecture Notes in Computer Science 3784\uff0ced. By J. Tao, T. Tan, and R.W. Picard, Springer, 2005, pp. 390-396.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Features Importance Analysis for Emotional Speech Classification",
"authors": [
{
"first": "J",
"middle": [],
"last": "Tao",
"suffix": ""
},
{
"first": "Y",
"middle": [],
"last": "Kang ; By",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tao",
"suffix": ""
},
{
"first": "T",
"middle": [],
"last": "Tan",
"suffix": ""
},
{
"first": "R",
"middle": [
"W"
],
"last": "Picard",
"suffix": ""
}
],
"year": 2005,
"venue": "",
"volume": "",
"issue": "",
"pages": "449--457",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tao, J., and Y. Kang, \"Features Importance Analysis for Emotional Speech Classification,\" Lecture Notes in Computer Science 3784\uff0ced. By J. Tao, T. Tan, and R.W. Picard, Springer, 2005, pp. 449-457.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "On analysis of eigenpitch in Mandarin Chinese",
"authors": [
{
"first": "J",
"middle": [],
"last": "Tian",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Nurminen",
"suffix": ""
}
],
"year": 2004,
"venue": "Proceedings of 2004 International Symposium on Chinese Spoken Language Processing",
"volume": "",
"issue": "",
"pages": "89--92",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tian, J., and J. Nurminen, \"On analysis of eigenpitch in Mandarin Chinese,\" in Proceedings of 2004 International Symposium on Chinese Spoken Language Processing, 2004, Beijing, China, pp. 89-92.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "The acoustic realization of anger, fear, joy and sadness in Chinese",
"authors": [
{
"first": "J",
"middle": [],
"last": "Yuan",
"suffix": ""
},
{
"first": "L",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Chen",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of seventh International Conference on Spoken Language Processing",
"volume": "",
"issue": "",
"pages": "2025--2028",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yuan, J., L. Shen, and F. Chen, \"The acoustic realization of anger, fear, joy and sadness in Chinese,\" in Proceedings of seventh International Conference on Spoken Language Processing, 2002, Denver, Colorado, USA, pp. 2025-2028.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "A study on Emotional Feature Analysis and Recognition in Speech",
"authors": [
{
"first": "L",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Jiang",
"suffix": ""
},
{
"first": "C",
"middle": [],
"last": "Zou",
"suffix": ""
},
{
"first": "Z",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 2004,
"venue": "Acta Electronica Sinica",
"volume": "32",
"issue": "4",
"pages": "606--609",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhao, L., C. Jiang, C. Zou, and Z. Wu, \"A study on Emotional Feature Analysis and Recognition in Speech,\" Acta Electronica Sinica, 32(4), 2004, pp. 606-609.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Rule-learning Based Prosodic Structure Prediction",
"authors": [
{
"first": "S",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tao",
"suffix": ""
},
{
"first": "H",
"middle": [],
"last": "Cai",
"suffix": ""
}
],
"year": 2002,
"venue": "Journal of Chinese Information Processing",
"volume": "16",
"issue": "5",
"pages": "30--37",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zhao, S., J. Tao, and H. Cai, \"Rule-learning Based Prosodic Structure Prediction,\" Journal of Chinese Information Processing, 16(5) , 2002, pp. 30-37.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Statistic results for F0 with different emotional states",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF1": {
"text": "is the projecting representation vector (henceforth projecting vector) in P of the k-th intonation with emotion a. K a is the total number of all intonation samples with emotion a.{ \u03b1 \u2126 , a = 1, 2, \u2026, 6} are the resultant affective intonations with low dimension basing eigen-intonation. They are restored in the original intonation space O as (10",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF2": {
"text": "Illustration for restoring with eigen-intonationsdifferent emotions are projected into six-dimensional sub-space P spanned by eigen-intonations. The distribution of first three weights of the projecting vector \u2126 a is shown asFigure 4, and the mass kernel of each emotional state is indicated by red color in the figure.",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF3": {
"text": "Distribution of first 3 weights of affective intonations in eigen sub-space",
"uris": null,
"type_str": "figure",
"num": null
},
"FIGREF4": {
"text": "Intonations transferring corresponding to that in sub-space Transferring illustration of affective intonation in sub-spaceFigure 6 and",
"uris": null,
"type_str": "figure",
"num": null
},
"TABREF0": {
"text": "",
"type_str": "table",
"content": "<table><tr><td>L -component number</td><td>3</td><td>4</td><td>5</td><td>6</td></tr><tr><td>r -restoring rate</td><td>81.61%</td><td>95.71%</td><td>99.46%</td><td>99.89%</td></tr></table>",
"num": null,
"html": null
}
}
}
} |