ftshijt's picture
Update src/display/about.py
bfa39e6 verified
raw
history blame
1.73 kB
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Init: to update with your specific keys
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("tts_vocoder1", "MCD", "MCD Eval")
task1 = Task("tts_vocoder2", "Log F0 RMSE", "Log F0 RMSE Eval")
task2 = Task("tts_vocoder3", "UTMOS", "UTMOS Eval")
task3 = Task("tts_vocoder4", "Bitrate", "Bitrate")
task4 = Task("tts_vocoder5", "Sample Rate", "Sample Rate")
task6 = Task("tts_vocoder7", "LowSR-Rank", "LowSR-Rank")
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">Discrete-based Vocoder (LowSR) Leaderboard</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
The leaderboard for discrete speech challenge (TTS Challenge - Vocoder Track - LowSR) at Interspeech 2024. Challenge details can be found at https://www.wavlab.org/activities/2024/Interspeech2024-Discrete-Speech-Unit-Challenge/
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## How it works
The evaluation (static version) are conducted by the organizers only.
We will accept submissions from the google form (see rules in the challenge website).
## Reproducibility
To reproduce our results, please refer to the evaluation scripts at https://github.com/espnet/espnet/tree/master/egs2/TEMPLATE/tts1#evaluation
"""
EVALUATION_QUEUE_TEXT = """
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results and the challenge (coming soon, for now, please cite the challenge website)"
CITATION_BUTTON_TEXT = r"""
"""