prosalign / app.py
clr's picture
Update app.py
47eb520
raw
history blame
6.41 kB
import gradio as gr
import subprocess,os
from datasets import load_dataset, Audio
import datas,ctcalign,graph
from numpy import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def setup():
r0 = subprocess.run(["pwd"], capture_output=True, text=True)
print('PWD::', r0.stdout)
r1 = subprocess.run(["wget", "https://github.com/google/REAPER/archive/refs/heads/master.zip"], capture_output=True, text=True)
print(r1.stdout)
subprocess.run(["unzip", "./master.zip"])
subprocess.run(["mv", "REAPER-master", "REAPER"])
subprocess.run(["rm", "./master.zip"])
os.chdir('./REAPER')
subprocess.run(["mkdir", "build"])
os.chdir('./build')
r2 = subprocess.run(["cmake", ".."], capture_output=True, text=True)
print(r2.stdout)
r3 = subprocess.run(["make"], capture_output=True, text=True)
print(r3.stdout)
os.chdir('../..')
r9 = subprocess.run(["ls", "-la"], capture_output=True, text=True)
print('LS::', r9.stdout)
#print('about to setup')
setup()
# display some of the selected corpus
# (because gradio pagination is currently broken)
# and reset all filter menus
def pick_lang(langname):
if langname=="Icelandic":
df = datas.ds_i
elif langname =="Faroese":
df = datas.ds_f
df = df.data.to_pandas()
df = df.drop(columns=['audio', 'speaker_id','duration'])
return (df[:15],"all") #(df, df[:50])
def f1(langname,gender):
if langname=="Icelandic":
ds = datas.ds_i
lang_aligner = datas.a_i
elif langname =="Faroese":
ds = datas.ds_f
lang_aligner = datas.a_f
if gender != "all":
ds = ds.filter(lambda x: x["gender"].startswith(gender))
maxdat=len(ds)
ds = ds.select([random.randint(maxdat-1)])
sound_path = ds['audio'][0]['path'] # audio 0 array is the audio data itself
transcript = ds['normalized_text'][0]
rec_info = f"{ds['audio_id'][0]}, {ds['gender'][0]}, {ds['age'][0]}"
if langname =="Faroese":
rec_info += f", {ds['dialect'][0]}"
return (graph.align_and_graph(sound_path,transcript,lang_aligner),sound_path,rec_info)
bl = gr.Blocks()
with bl:
gr.Markdown(
"""
# Demo under construction
### 1. Choose a language to load
### 2. See a small sample of the selected corpus
### 3. Click the button below to view time-aligned prosody information for a random example
""" )
with gr.Row():
langmenu = gr.Dropdown(["Faroese", "Icelandic"], label="Language")#, info="Loading the dataset takes some time")
gr.Markdown(
"""
Pitch is shown in dark blue and loudness is the light orange line. The pitch estimation, and the time-alignment of words to audio, are completely automated and there will be some inaccuracy.
The random example can be from the whole corpus, not necessarily one of the visible rows. More information below.
""" )
align_func = gr.State()#value=ctcalign.aligner(model_path="carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h",model_word_separator = '|',model_blank_token = '[PAD]'))
with gr.Row():
gmenu = gr.Dropdown(["all", "f", "m"], label="Gender", value="all")
amenu = gr.Dropdown(["all", '15-35', '36-60', '61+'], label="Age", value="all")
with gr.Row():
#invisidata = gr.DataFrame(interactive=False, visible=False)
databrowser = gr.DataFrame(wrap=True, max_rows=50, interactive=False, overflow_row_behaviour='paginate')
with gr.Row():
with gr.Column(scale=1):
btn1 = gr.Button(value="CLICK HERE")
btn1.style(size="lg",full_width=True)
with gr.Column(scale=4):
audio1 = gr.Audio(interactive=False)
ainfo = gr.Markdown("""
Audio file info
""")
pl1 = gr.Plot()
# when user selects a language,
# display some data
# and reset all filter menus
langmenu.change(pick_lang,langmenu,[databrowser,gmenu])
#
btn1.click(f1, [langmenu,gmenu], [pl1,audio1,ainfo])
gr.Markdown(
"""
# ABOUT
The Icelandic corpus is [samromur-asr](https://huggingface.co/datasets/language-and-voice-lab/samromur_asr), and Faroese uses [ravnursson-asr](https://huggingface.co/datasets/carlosdanielhernandezmena/ravnursson_asr).
### Forced alignment
The prosody graphs are marked with time-alignments for the words found by [CTC decoding](https://pytorch.org/audio/main/tutorials/forced_alignment_tutorial.html). This uses wav2vec-2.0 based models ([Faroese](https://huggingface.co/carlosdanielhernandezmena/wav2vec2-large-xlsr-53-faroese-100h), [Icelandic](https://huggingface.co/carlosdanielhernandezmena/wav2vec2-large-xlsr-53-icelandic-ep10-1000h)) and tends to be more robust than Montreal Forced Aligner.
However, this aligner does not contain any phoneme representation, and therefore, segment alignments are for orthographic characters rather than phonemes. Especially in languages with shallow orthography, these letter alignments probably indicate something about the timing of sounds in a word, but the exact durations should not be taken too seriously especially in cases like doubled or silent letters.
### Pitch tracking (F0 estimation)
Estimated pitch is shown in blue on the graphs, as tracked by [REAPER](https://github.com/google/REAPER).
### Intensity
The orange line is root mean squared energy, which reflects loudness and is also a good indication of syllable placement, as it should correspond to vowels and similar sounds.
This is a work-in-progress basic demo for automatic prosodic annotation in Faroese and Icelandic.
So far, you cannot select or upload your own choice of sentence for analysis, nor search the corpora. Also, it does not display well when the sentence is too long. In that case, or if there are serious errors in the automated analyses, try another random sentence.
Contact caitlinr@ru.is / https://github.com/catiR/ when things break, or with ideas/suggestions about how to apply this. Unfortunately I am not a web/interface designer so this is not going to look nice or be user friendly, I only do speech processing.
The source code is available under the Files tab at the top of the Space.
"""
)
if __name__ == "__main__":
bl.launch()