File size: 54,303 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
{
    "paper_id": "2020",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T12:14:03.823999Z"
    },
    "title": "Vietnamese Text-To-Speech Shared Task VLSP 2020: Remaining problems with state-of-the-art techniques",
    "authors": [
        {
            "first": "Thi",
            "middle": [
                "Thu"
            ],
            "last": "Nguyen",
            "suffix": "",
            "affiliation": {},
            "email": ""
        },
        {
            "first": "",
            "middle": [],
            "last": "Trang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Hanoi University of Science and Technology Hanoi",
                "location": {
                    "country": "Vietnam"
                }
            },
            "email": "trangntt@soict.hust.edu.vn"
        },
        {
            "first": "Nguyen",
            "middle": [],
            "last": "Hoang Ky",
            "suffix": "",
            "affiliation": {
                "laboratory": "R&D Lab Vbee Services and Data Processing Solution Jsc. Hanoi",
                "institution": "",
                "location": {
                    "country": "Vietnam"
                }
            },
            "email": ""
        },
        {
            "first": "Pham",
            "middle": [],
            "last": "Quang",
            "suffix": "",
            "affiliation": {
                "laboratory": "R&D Lab Vbee Services and Data Processing Solution Jsc. Hanoi",
                "institution": "",
                "location": {
                    "country": "Vietnam"
                }
            },
            "email": ""
        },
        {
            "first": "V",
            "middle": [
                "U"
            ],
            "last": "Duy Manh",
            "suffix": "",
            "affiliation": {
                "laboratory": "R&D Lab Vbee Services and Data Processing Solution Jsc. Hanoi",
                "institution": "",
                "location": {
                    "country": "Vietnam"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "and Technology (HUST). This was the third time we organized the Text-To-Speech shared task. In order to better understand different speech synthesis techniques on a common Vietnamese dataset, we conducted a challenge that helps us better compare research techniques in building corpusbased speech synthesizers. Participants were provided with a single training dataset including utterances and their corresponding texts. There are 7,770 utterances of a female Southwest professional speaker (about 9.5 hours). There is a total of 59 teams registered to participate in this shared task, and finally, 7 participants were evaluated online with perceptual tests. The best synthetic voice with Tacotron 2 and Hifigan vocoder with Waveglow denoiser achieved 89.3% compared to the human voice in terms of naturalness, i.e. 3.77 over 4.22 points on a 5-point MOS scale). Some reasons for a quite-big gap between the best synthetic voice with state-of-the-art synthetic techniques and the human voice were: (i) improper prosodic phrasing for long sentences and (ii) wrong/bad pronunciation for loan words.",
    "pdf_parse": {
        "paper_id": "2020",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "and Technology (HUST). This was the third time we organized the Text-To-Speech shared task. In order to better understand different speech synthesis techniques on a common Vietnamese dataset, we conducted a challenge that helps us better compare research techniques in building corpusbased speech synthesizers. Participants were provided with a single training dataset including utterances and their corresponding texts. There are 7,770 utterances of a female Southwest professional speaker (about 9.5 hours). There is a total of 59 teams registered to participate in this shared task, and finally, 7 participants were evaluated online with perceptual tests. The best synthetic voice with Tacotron 2 and Hifigan vocoder with Waveglow denoiser achieved 89.3% compared to the human voice in terms of naturalness, i.e. 3.77 over 4.22 points on a 5-point MOS scale). Some reasons for a quite-big gap between the best synthetic voice with state-of-the-art synthetic techniques and the human voice were: (i) improper prosodic phrasing for long sentences and (ii) wrong/bad pronunciation for loan words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Processing Consortium. It is an initiative to establish a community working on speech and text processing for the Vietnamese language [2] . The VLSP 2020 was the sixth annual international workshop. The Text-To-Speech (TTS) shared task was a challenge in the VLSP Campaign 2020, which was organized at Hanoi University of Science and Technology. This was the third time we organized the challenge in speech synthesis.",
                "cite_spans": [
                    {
                        "start": 134,
                        "end": 137,
                        "text": "[2]",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "I. INTRODUCTION VLSP stands for Vietnamese Language and Speech",
                "sec_num": null
            },
            {
                "text": "To the best of our knowledge, Vietnamese TTS systems can be divided into three main types:(i) Hidden Markov Model (HMM) based systems, (ii) Deep Neural Network (DNN) based systems, and (iii) state-of-the-art end-to-end systems. HMM-based TTS systems [6] [10] and DNN-based TTS systems [4] [9] need to provide pause position and loanword pronunciation in the text pre-processing step. Some end-to-end TTS systems, such as Tacotron [3] [11] , could use a massive amount of text and audio data pairs to learn prosody and loanword modeling directly from the TTS training process. Nevertheless, corpora do not always design to support that purpose.",
                "cite_spans": [
                    {
                        "start": 250,
                        "end": 253,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 254,
                        "end": 258,
                        "text": "[10]",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 285,
                        "end": 288,
                        "text": "[4]",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 289,
                        "end": 292,
                        "text": "[9]",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 430,
                        "end": 433,
                        "text": "[3]",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 434,
                        "end": 438,
                        "text": "[11]",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "I. INTRODUCTION VLSP stands for Vietnamese Language and Speech",
                "sec_num": null
            },
            {
                "text": "This shared task has been designed for understanding and figuring out remaining problems in Vietnamese TTS with state-of-the-art speech synthesis techniques on the same dataset. Based on some subjective feedback from listeners of the last year's TTS shared task, three main problems have been raising for this year: prosodic phrasing (mainly focusing on pause detection) [5] , text normalization (mainly focusing on loanwords) [6] [8] , and removing noise for Internet datasets.",
                "cite_spans": [
                    {
                        "start": 371,
                        "end": 374,
                        "text": "[5]",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 427,
                        "end": 430,
                        "text": "[6]",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 431,
                        "end": 434,
                        "text": "[8]",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "I. INTRODUCTION VLSP stands for Vietnamese Language and Speech",
                "sec_num": null
            },
            {
                "text": "Participants took the released speech dataset, build a synthetic voice from the data and submit the TTS system. We then synthesized a prescribed set of test sentences using each submitted TTS system. The synthesized utterances were then imported to an online evaluation system. Some perception tests were carried out to rank the synthesizers focusing on evaluating the intelligibility and the naturalness of participants' synthetic utterances.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "I. INTRODUCTION VLSP stands for Vietnamese Language and Speech",
                "sec_num": null
            },
            {
                "text": "The rest of this paper is organized as follows. Section II presents the common dataset and its preparation. Section III introduces participants and a complete process of the TTS shared task in VLSP Campaign 2020. We then show the evaluation design and experimental results in Section IV. We finally conclude the task and give some possible ideas for the next challenge in Section V.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "I. INTRODUCTION VLSP stands for Vietnamese Language and Speech",
                "sec_num": null
            },
            {
                "text": "The topic of this shared task is to address remaining problems of TTS systems using state-of-the-art synthesis techniques. Based on some analyses on the previous task results, aforementioned, we raised the following issues for this shared task: (i) prosodic phrasing (focusing on pause detection for long input sentences), (ii) text normalization (focusing on expanding loanwords), and (iii) removing background noises (of Internet audios).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "Due to the topic of this year's task, we decided to collect audiobooks from the Internet. Vbee Jsc supported to build the dataset for this task. The corpus was taken from a novel called \"Bell to Whom the Soul\" by Hemingway, a famous American novelist. Audio stories were downloaded manually, divided into 28 long audio files, each had 30 to 60 minutes in length. These files were then automatically split into smaller audio files that are less than 10 seconds in length (using Praat scripting tool). After this process, the number of sound files was up to nearly 20,000 sound files with different lengths.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "However, approximately 10,000 sound files that were too short in length (i.e. less than 750 ms) were discarded. Next, we used the ASR API of Vais Jsc to convert the remaining 10,000 audio files into text. These data were checked by the teams participating in the contest. Each team only had to check xxx files for participation. Finally, 7,770 best quality utterances and their corresponding texts were selected as the final dataset. Even though the speaker's voice was professional and pretty, the voice still contained some background noise due to the recording device's low quality.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "III. PARTICIPANTS For TTS shared task this year, participants had to follow a complete process (Fig. 1 ), which was managed in the website of the TTS shared task of VLSP Campain 2020 (https://tts.vlsp.org.vn).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 95,
                        "end": 102,
                        "text": "(Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "First, each team registered to participate in the challenge. They were then provided with accounts to log into. On this site, all teams were asked to check the audio files to see if they match the corresponding text and edit if necessary. If they found that the text was exactly the content of the audio, they voted for that transcription. Each audio file needs to be checked by at least 3 teams. Audio files that had no vote after the validation process, we had to check them manually. The participants who completed the required task were asked to send their user license agreement with valid signatures. They were then able to download the training dataset. The dataset includes utterances and their corresponding texts in a text file.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "Participants were asked to build only one synthetic voice from the released database. All teams had 20 days for training and optimizing their voices. Each team then submitted the result with a TTS API following the announced specification requirement. We also supported teams that could not deploy their TTS systems to a public server by accepting their docker images that contain the TTS API. We then synthesized audio files from the text files in the test dataset using teams' TTS API. Synthesized files will be evaluated. After receiving evaluation results, the teams proceed to write and submit technical reports. Fig. 2 compares the number of participants of last year to this year. Fifty-nine teams registered for this year's challenge. Unlike last year, participants were asked to validate the provided dataset, and 19 joined the data validation process, and 15 teams obtained the data after sending the signed user agreement. Finally, nine teams, compared to four in 2019, submitted their TTS system. We synthesized testing audio through the TTS API of each team. Unfortunately, we could not use the TTS API of the two teams due to problems with their TTS system or their server. Table I gives the list of participants that had final submissions to the VLSP TTS shared task 2020. IV. EVALUATION Perceptual testing was chosen for evaluating synthetic voices. First, an intelligibility test was conducted to measure the understandability, then the MOS test, which allowed us to score and compare the global quality of TTS systems with respect to natural speech references. All subjects conducted the online evaluation via a web application. This online evaluation system was built by the School of Information and Communication Technology, Hanoi University of Science and Technology, and Vbee Jsc. This system was integrated into https://tts.vlsp.or.vn.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 618,
                        "end": 624,
                        "text": "Fig. 2",
                        "ref_id": "FIGREF1"
                    },
                    {
                        "start": 1188,
                        "end": 1195,
                        "text": "Table I",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "II. COMMON DATASET",
                "sec_num": null
            },
            {
                "text": "They first registered on the website with necessary information including their hometowns, ages, genders, occupations. They were trained on how to use the website and how to conduct a good test. They were strictly asked to do the test in a controlled listening condition (i.e. headphones and in a quiet distraction-free environment). To ensure that the subjects focused on the test, we designed several sub-tests for each test due to a big number of testing voices (i.e. 8 voices including natural speech). As a result, each sub-test lasts from 25 to 30 minutes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2019",
                "sec_num": "2020"
            },
            {
                "text": "On completion of any sub-test, or after logging in again, a progress page showed listeners how much they had completed. Detailed instructions for each sub-test were only shown on the page with the first part of each sub-test; subsequent parts had briefer instructions in order to achieve a simple layout and a focussed presentation of the task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2019",
                "sec_num": "2020"
            },
            {
                "text": "In order to address the issue of duplicate contents of stimuli, we adopted the Latin square (nxn) [1] for all sub-tests, where n is a number of voices in the sub-test. To be more specific, each subject listened to one n th of the utterances per voice, without any duplicate content. With the Latin square design, the number of subjects should be at least twice more than the ones with the normal design. Stimuli were randomly and separately presented only once to subjects. Each stimulus was an output speech of a TTS system or a natural speech for a sentence. Details of the two tests are described in the following subsections.",
                "cite_spans": [
                    {
                        "start": 98,
                        "end": 101,
                        "text": "[1]",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2019",
                "sec_num": "2020"
            },
            {
                "text": "In the intelligibility test, subjects were asked to write down the text of the audio they heard (Fig. 3) . The subjects might listen again a second time if they do not hear clearly or have long sentences. They only listened to the utterances the third time when the subjects were distracting, or the sentence were very long. Twenty-seven subjects participated in this test. There were two main types of subjects who participated in the test: (i) 19 students (19-22 years-old, 10 females) from Hanoi University of Science and Technology, VNU University of Science; (ii) 8 speech experts (23-38 years-old, 4 female).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 96,
                        "end": 104,
                        "text": "(Fig. 3)",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "A. Intelligibility Test",
                "sec_num": null
            },
            {
                "text": "The testing dataset included 36 sentences. Each subject needs to participate in at least two of the three sub-tests.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A. Intelligibility Test",
                "sec_num": null
            },
            {
                "text": "Subjects (i.e. listeners) were asked to assess by giving scores to the speech they had heard (Fig. 4) . When taking this test, subjects listen to the voice once, unless they do not hear it clearly, then listen for a second time.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 93,
                        "end": 101,
                        "text": "(Fig. 4)",
                        "ref_id": "FIGREF3"
                    }
                ],
                "eq_spans": [],
                "section": "B. MOS Test",
                "sec_num": null
            },
            {
                "text": "Subjects randomly listened to utterances and then gave their scores for the naturalness of the utterances. The question presented to subjects was \"How do you rate the naturalness of the sound you have just heard?\". Subjects could choose one of the following five options (5-scale):",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "B. MOS Test",
                "sec_num": null
            },
            {
                "text": "-5: Excellent, very natural (human speech) -4: Good, natural -3: Fair, rather natural -2: Poor, rather unnatural (rather robotic) -1: Bad, very unnatural (robotic). Testing text set includes 60 sentences. There are two subtests, including 60 random utterances each (taken from 480 utterances). Table III illustrates the design for the two MOS sub-tests. We put the natural speech (NATURAL) as a reference in both sub-tests. Due to an odd number of final participated teams, sub-test 1 included 3 teams (Team 2,4,5) while sub-test 2 had voices from the remaining 4 teams (Team 6,7,8,9). Subjects participated in two sub-tests for voices built from the common dataset. Due to a rather big number of voices in each sub-test (i.e. 5 including the natural reference), we let the subjects to heard randomly half of the utterances for each voice. The number of subjects who listened to each sub-test was 48 (20 females). Each subject needs to participate in all two sub-tests, estimated at 25 to 30 minutes.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 294,
                        "end": 303,
                        "text": "Table III",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "B. MOS Test",
                "sec_num": null
            },
            {
                "text": "Due to a large number of loanwords in the test set, the intelligibility results were not good, at about 68-89% at both word and syllable levels, even with natural speech. The subjects might do not know how to write these loanwords or present different orthography from the original text. We should have a special design and more analyses for this type of test in the future.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A. Intelligibility Score",
                "sec_num": null
            },
            {
                "text": "The perceptual evaluation of the general naturalness was carried out on different voices of participants and a natural speech reference (NATURAL) of the same speaker as the training corpus. Fig. 5 and Table IV show the final MOS test results. Only three teams submitted technical reports, i.e. Team2, Team6, and Team7.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 190,
                        "end": 196,
                        "text": "Fig. 5",
                        "ref_id": "FIGREF4"
                    },
                    {
                        "start": 201,
                        "end": 209,
                        "text": "Table IV",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "B. MOS Score",
                "sec_num": null
            },
            {
                "text": "We can see that Team2 was the best team (i.e. 3.769)about 89.3% compared to the natural speech (i.e. 4.220/5). This team adopted Tacotron-2 as the acoustic model, and HiFi-GAN as a real-time vocoder, and Waveglow as a denoiser. Team7 was the second place with a 3.698 score (only less than the first place 0.07 point). This team used FastSpeech and PostNet, which could be considered as a faster acoustic model, compared to Tacotron-2 or only FastSpeech. Team6 was the fifth place with a 3.313 score. Their acoustic model was Tacotron2, and their vocoder was Waveglow. Although using state-of-the-art synthesis techniques that lead to a high-quality synthetic voice, there were still some remaining problems in the results of participants. Some reasons were found for a quite-big gap between the best synthetic voice with state-of-the-art synthetic techniques and the human voice: (i) improper prosodic phrasing for long sentences and (ii) wrong/bad pronunciation for loan words.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "B. MOS Score",
                "sec_num": null
            },
            {
                "text": "Several two-factorial ANOVAs were run on the MOS results, illustrated in Table V . The two factors were the TTS system (8 levels) and the Sentence (60 levels) or the Subject (48 levels). All factors and their interactions in both ANOVAs had significant effect (p<0.0001).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 73,
                        "end": 80,
                        "text": "Table V",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "C. Analysis and Discussion",
                "sec_num": null
            },
            {
                "text": "The TTS system factor alone explained an important part of the variance over levels of both Sentence (29%) and Subject factors (30%). The Sentence factor explained only about 8% of the variance (partial 2 = 0.08) while the Subject did 19% (partial 2 = 0.19). The interaction between the System and Sentence or Subject explained a quite important part of the variance, i.e. 21% and 14% respectively. We did observe the sentences with bad scores and found that they were long sentences or had a number of loanwords. Synthetic utterances having consecutive loanwords are extremely bad intelligible. These problems led to bad scores for both Intelligibility and MOS Test.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "C. Analysis and Discussion",
                "sec_num": null
            },
            {
                "text": "We did some valuable experiments on TTS systems from different participants using a common dataset in the TTS shared task in the VLSP Campaign 2020. Participants had to validate a piece of training data before receiving the common dataset. There are 7,770 utterances of a female Southwest professional speaker (about 9.5 hours) in the released training dataset. Although using state-of-the-art synthesis techniques that lead to a high-quality synthetic voice, there were still some remaining problems in the results of participants. The best synthetic voice with Tacotron 2 and Hifigan vocoder with Waveglow denoiser achieved 89.3% compared to the human voice, i.e. 3.77 over 4.22 point on a 5-point MOS scale). Some reasons were found for a quite-big gap between the best synthetic voice with state-of-the-art synthetic techniques and the human voice: (i) improper prosodic phrasing for long sentences and (ii) wrong/bad pronunciation for loan words. For the next speech synthesis task of the VLSP Campaign in 2021, we may have more advanced topics for Vietnamese speech synthesis, such as speaker adaptation or expressive speech synthesis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "VI. CONCLUSIONS",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The VLSP 2020 TTS shared task was mainly supported by the R&D Lab, Vbee Services and Data Processing Solution Jsc, and School of Information and Communication Technology. They supported this shared task in developing, deploying, and conducting the online evaluation, based on perception tests as well as building the dataset for the challenge. This task was funded by the Vingroup Innovation Foundation (VINIF) under the project code DA116_14062019 / year 2019. We would like to thank Vais Jsc. for their ASR in building the dataset, and last but not least, the subjects who gave time and effort for the experiments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "ACKNOWLEDGMENT",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Experimental Designs",
                "authors": [
                    {
                        "first": "Cochran",
                        "middle": [],
                        "last": "William",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Cox Gertrude",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cochran William G. and Cox Gertrude M. \"Experimental Designs, 2nd Edition\". Wiley, 2 edition, April 1992. ISBN 0471545678.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Special Issue in VLSP",
                "authors": [
                    {
                        "first": "Mai",
                        "middle": [],
                        "last": "Luong Chi",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Journal of Computer Science and Cybernetics",
                "volume": "",
                "issue": "4",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Luong Chi Mai. \"Special Issue in VLSP 2018\". Journal of Computer Science and Cybernetics, V.34, N.4 (2018).",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "2018 IEEE International Conference on Acoustics, Speech and Signal Processing",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Shen",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Pang",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [
                            "J"
                        ],
                        "last": "Weiss",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Shen, J., Pang, R., Weiss, R.J., et al. 2017. \"Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions\". 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2018.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Statistical Parametric Speech Synthesis using Deep Neural Networks\" on 2013 IEEE International Conference on Acoustics, Speech and Signal Processing",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Ze",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Senior",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Schuster",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "7962--7966",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. Ze, A. Senior, and M. Schuster, \"Statistical Parametric Speech Synthesis using Deep Neural Networks\" on 2013 IEEE International Conference on Acoustics, Speech and Signal Processing 2013, pp. 7962-7966. IEEE.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Prosodic Phrasing Modeling for Vietnamese TTS using Syntactic Information",
                "authors": [
                    {
                        "first": "Thu",
                        "middle": [],
                        "last": "Nguyen Thi",
                        "suffix": ""
                    },
                    {
                        "first": "Albert",
                        "middle": [],
                        "last": "Trang",
                        "suffix": ""
                    },
                    {
                        "first": "Tran",
                        "middle": [
                            "Do"
                        ],
                        "last": "Rilliard",
                        "suffix": ""
                    },
                    {
                        "first": "Christophe",
                        "middle": [],
                        "last": "Dat",
                        "suffix": ""
                    }
                ],
                "year": 2014,
                "venue": "15th Annual Conference of the International Speech Communication Association",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nguyen Thi Thu Trang, Albert Rilliard, Tran Do Dat, and Christophe d'Alessandro, \"Prosodic Phrasing Modeling for Vietnamese TTS using Syntactic Information\" in 15th Annual Conference of the International Speech Communication Association. Singapore. 2014.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "HMM-based TTS for Hanoi Vietnamese: Issues in Design and Evaluation",
                "authors": [
                    {
                        "first": "Thu",
                        "middle": [],
                        "last": "Nguyen Thi",
                        "suffix": ""
                    },
                    {
                        "first": "Alessandro",
                        "middle": [],
                        "last": "Trang",
                        "suffix": ""
                    },
                    {
                        "first": "Rilliard",
                        "middle": [],
                        "last": "Christophe",
                        "suffix": ""
                    },
                    {
                        "first": "Tran Do",
                        "middle": [],
                        "last": "Albert",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Dat",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "14th Annual Conference of the International Speech Communication Association",
                "volume": "",
                "issue": "",
                "pages": "2311--2315",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nguyen Thi Thu Trang, Alessandro Christophe, Rilliard Albert, and Tran Do Dat. \"HMM-based TTS for Hanoi Vietnamese: Issues in Design and Evaluation\" in 14th Annual Conference of the International Speech Communication Association (Interspeech 2013), pages 2311- 2315. Lyon, France, August 2013b. ISCA.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "A method for Vietnamese Text Normalization to Improve the Quality of Speech Synthesis",
                "authors": [
                    {
                        "first": "Thu",
                        "middle": [],
                        "last": "Nguyen Thi",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Trang",
                        "suffix": ""
                    },
                    {
                        "first": "Thi",
                        "middle": [],
                        "last": "Pham",
                        "suffix": ""
                    },
                    {
                        "first": "Tran Do",
                        "middle": [],
                        "last": "Thanh",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Dat",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the 2010 Symposium on Information and Communication",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nguyen Thi Thu Trang, Pham Thi Thanh, and Tran Do Dat. \"A method for Vietnamese Text Normalization to Improve the Quality of Speech Synthesis\" in Proceedings of the 2010 Symposium on Information and Communication (SoICT 2010), Hanoi, Vietnam. 2010.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "A Hybrid Method for Vietnamese Text Normalization",
                "authors": [
                    {
                        "first": "Thu",
                        "middle": [],
                        "last": "Nguyen Thi",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Trang",
                        "suffix": ""
                    },
                    {
                        "first": "Xuan",
                        "middle": [],
                        "last": "Dang",
                        "suffix": ""
                    },
                    {
                        "first": "Nguyen Xuan",
                        "middle": [],
                        "last": "Bach",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Tung",
                        "suffix": ""
                    }
                ],
                "year": 2019,
                "venue": "Proceedings of the 2019 3rd International Conference on Natural Language Processing and Information Retrieval",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nguyen Thi Thu Trang, Dang Xuan Bach, and Nguyen Xuan Tung. \"A Hybrid Method for Vietnamese Text Normalization\" in Proceedings of the 2019 3rd International Conference on Natural Language Processing and Information Retrieval (NLPIR 2019). Japan. 2019.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Development of Vietnamese Speech Synthesis System using Deep Neural Networks",
                "authors": [
                    {
                        "first": "Nguyen Quoc",
                        "middle": [],
                        "last": "Nguyen Van Thinh",
                        "suffix": ""
                    },
                    {
                        "first": "Phan",
                        "middle": [],
                        "last": "Bao",
                        "suffix": ""
                    },
                    {
                        "first": "Do",
                        "middle": [],
                        "last": "Huy Kinh",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Van Hai",
                        "suffix": ""
                    }
                ],
                "year": 2018,
                "venue": "Journal of Computer Science and Cybernetics",
                "volume": "",
                "issue": "4",
                "pages": "349--363",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nguyen Van Thinh, Nguyen Quoc Bao, Phan Huy Kinh, Do Van Hai, Development of Vietnamese Speech Synthesis System using Deep Neural Networks, Journal of Computer Science and Cybernetics, V.34, N.4 (2018), 349-363.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "An HMM-based Vietnamese Speech Synthesis System",
                "authors": [
                    {
                        "first": "Luong",
                        "middle": [],
                        "last": "Vu Thang Tat",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Mai Chi",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Nakamura",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of the Oriental COCOSDA International Conference on Speech Database and Assessments",
                "volume": "",
                "issue": "",
                "pages": "116--121",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vu Thang Tat, Luong Mai Chi, and Nakamura S. \"An HMM-based Vietnamese Speech Synthesis System\" in Proceedings of the Oriental COCOSDA International Conference on Speech Database and Assessments, pages 116-121, Beijing, China, 2009.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Tacotron:Towards End-to-end Speech Synthesis",
                "authors": [
                    {
                        "first": "Yuxuan",
                        "middle": [],
                        "last": "Wang",
                        "suffix": ""
                    },
                    {
                        "first": "Daisy",
                        "middle": [],
                        "last": "Skerry-Ryan",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Stanton",
                        "suffix": ""
                    },
                    {
                        "first": "Ron",
                        "middle": [
                            "J"
                        ],
                        "last": "Yonghuiwu",
                        "suffix": ""
                    },
                    {
                        "first": "Navdeep",
                        "middle": [],
                        "last": "Weiss",
                        "suffix": ""
                    },
                    {
                        "first": "Zongheng",
                        "middle": [],
                        "last": "Jaitly",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Yang",
                        "suffix": ""
                    },
                    {
                        "first": "Zhifeng",
                        "middle": [],
                        "last": "Yingxiao",
                        "suffix": ""
                    },
                    {
                        "first": "Samy",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Bengio",
                        "suffix": ""
                    }
                ],
                "year": 2017,
                "venue": "18th Annual Conference of the International Speech Communication Association",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Yuxuan Wang, RJ Skerry-Ryan, Daisy Stanton, YonghuiWu, Ron J Weiss, Navdeep Jaitly, Zongheng Yang, YingXiao, Zhifeng Chen, Samy Bengio, et al. \"Tacotron:Towards End-to-end Speech Synthesis\" in 18th Annual Conference of the International Speech Communication Association. Sweden. 2017.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "text": "A complete process for participating TTS shared task VLSP 2020.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF1": {
                "text": "Participants in VLSP TTS 2020 and 2019.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF2": {
                "text": "Online Tool for Intelligibility Test.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF3": {
                "text": "Online Tool for MOS Test.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "FIGREF4": {
                "text": "MOS Test Final Results.",
                "uris": null,
                "type_str": "figure",
                "num": null
            },
            "TABREF0": {
                "content": "<table><tr><td>No</td><td>Team ID</td><td>Affiliation</td><td>Submission</td></tr><tr><td>1</td><td>Team1</td><td>Unaffiliated</td><td>API (error)</td></tr><tr><td>2</td><td>Team2</td><td>Smartcall</td><td>API</td></tr><tr><td>3</td><td>Team3</td><td>Unaffiliated</td><td>Docker image (error)</td></tr><tr><td>4</td><td>Team4</td><td>Viettel Telecom</td><td>API</td></tr><tr><td>5</td><td>Team5</td><td>IC</td><td>IC</td></tr><tr><td>6</td><td>Team6</td><td>VAIS</td><td>API</td></tr><tr><td>7</td><td>Team7</td><td>Falcon</td><td>API</td></tr><tr><td>8</td><td>Team8</td><td>Sun Asterisk Inc.</td><td>Docker Image</td></tr><tr><td>9</td><td>Team9</td><td>UET</td><td>Docker Image</td></tr></table>",
                "num": null,
                "text": "LIST OF TEAMS PARTICIPATING IN VSLP TTS 2020",
                "type_str": "table",
                "html": null
            },
            "TABREF2": {
                "content": "<table><tr><td>.</td><td colspan=\"2\">DESIGN FOR INTELLIGIBILITY SUB-TESTS</td></tr><tr><td>Sub-test 1</td><td>Sub-test 2</td><td>Sub-test 3</td></tr><tr><td>IntelligibilityTest-1</td><td>IntelligibilityTest-2</td><td>IntelligibilityTest-3</td></tr><tr><td>Team 7</td><td>NATURAL</td><td>NATURAL</td></tr><tr><td>Team 8</td><td>Team 5</td><td>Team 2</td></tr><tr><td>Team 9</td><td>Team 6</td><td>Team 4</td></tr><tr><td colspan=\"3\">There are three sub-tests in the intelligibility test,</td></tr><tr><td colspan=\"3\">following the Latin Square design aforementioned. In each</td></tr><tr><td colspan=\"3\">sub-test, there were 3 voices of 3 different teams with or</td></tr><tr><td colspan=\"3\">without the natural speech reference (NATURAL). Details</td></tr><tr><td colspan=\"3\">for each sub-test is presented in Table II. Each sub-test</td></tr><tr><td colspan=\"3\">included voices of two (sub-test 2 and sub-test 3) or three</td></tr><tr><td colspan=\"3\">teams (sub-test 1). The natural speech was put in both sub-</td></tr><tr><td colspan=\"3\">test 2 and sub-test 3 for more reference. As a result, each sub-</td></tr><tr><td colspan=\"2\">test had a total of 3 voices.</td><td/></tr></table>",
                "num": null,
                "text": "",
                "type_str": "table",
                "html": null
            },
            "TABREF3": {
                "content": "<table><tr><td>Sub-test 1</td><td>Sub-test 2</td></tr><tr><td>MOS Test 1</td><td>MOS Test 2</td></tr><tr><td>NATURAL</td><td>NATURAL</td></tr><tr><td>Team 2</td><td>Team 6</td></tr><tr><td>Team 4</td><td>Team 7</td></tr><tr><td>Team 5</td><td>Team 8</td></tr><tr><td/><td>Team 9</td></tr></table>",
                "num": null,
                "text": "DESIGN FOR MOS TEST SUB-TESTS",
                "type_str": "table",
                "html": null
            },
            "TABREF4": {
                "content": "<table><tr><td>Testing voice</td><td>MOS Score (5-scale)</td><td>Synthesis Techniques</td></tr><tr><td>NATURAL</td><td>4.220</td><td/></tr><tr><td/><td/><td>\u2022 Acoustic model: Tacotron 2;</td></tr><tr><td>Team2</td><td>3.769</td><td>Vocoder: HiFi-GAN;</td></tr><tr><td/><td/><td>\u2022 Denoiser: Waveglow</td></tr><tr><td/><td/><td>\u2022 Acoustic model: FastSpeech</td></tr><tr><td>Team7</td><td>3.698</td><td>+ PostNet;</td></tr><tr><td/><td/><td>\u2022 Vocoder: Waveglow</td></tr><tr><td>Team6</td><td>3.313</td><td>\u2022 Acoustic model: Tacotron 2; \u2022 Vocoder: Waveglow</td></tr></table>",
                "num": null,
                "text": "MOS TEST RESULTS WITH SYNTHESIS TECHNIQUES",
                "type_str": "table",
                "html": null
            },
            "TABREF5": {
                "content": "<table><tr><td>Factor</td><td>df</td><td>df error</td><td>F</td><td>p</td><td/></tr><tr><td>System</td><td>7</td><td>5,688</td><td>335.38</td><td>0.0000</td><td>0.29</td></tr><tr><td>Sentence</td><td>59</td><td>5,688</td><td>8.71</td><td>0.0000</td><td>0.08</td></tr><tr><td>System:Sentence</td><td>412</td><td>5,688</td><td>3.57</td><td>0.0000</td><td>0.21</td></tr><tr><td>System</td><td>7</td><td>5,798</td><td>353.37</td><td>0.0000</td><td>0.30</td></tr><tr><td>Subject</td><td>47</td><td>5,798</td><td>29.29</td><td>0.0000</td><td>0.19</td></tr><tr><td>System:Subject</td><td>314</td><td>5,798</td><td>2.89</td><td>0.0000</td><td>0.14</td></tr></table>",
                "num": null,
                "text": "ANOVA RESULTS OF MOS TEST",
                "type_str": "table",
                "html": null
            }
        }
    }
}