File size: 1,539 Bytes
efeee6d 314f91a 95f85ed efeee6d 314f91a efeee6d 943f952 bc47b98 efeee6d bc47b98 58733e4 efeee6d 8c49cb6 bc47b98 0227006 efeee6d 0227006 d313dbd bc47b98 d313dbd bc47b98 d16cee2 d313dbd 8c49cb6 58733e4 2a73469 bc47b98 217b585 9833cdb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Init: to update with your specific keys
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("tts_vocoder1", "MCD", "MCD Eval")
task1 = Task("tts_vocoder2", "Log F0 RMSE", "Log F0 RMSE Eval")
task2 = Task("tts_vocoder3", "UTMOS", "UTMOS Eval")
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">Discrete-based Vocoder Leaderboard</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
The leaderboard for discrete speech challenge (TTS Challenge - Vocoder Track) at Interspeech 2024. Challenge details can be found at https://www.wavlab.org/activities/2024/Interspeech2024-Discrete-Speech-Unit-Challenge/
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## How it works
The evaluation (static version) are conducted by the organizers only.
We will accept submissions from the google form (see rules in the challenge website).
## Reproducibility
To reproduce our results, please refer to the evaluation scripts at https://github.com/espnet/espnet/tree/master/egs2/TEMPLATE/tts1#evaluation
"""
EVALUATION_QUEUE_TEXT = """
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results and the challenge (coming soon, for now, please cite the challenge website)"
CITATION_BUTTON_TEXT = r"""
"""
|