catiR commited on
Commit
0d67145
·
1 Parent(s): a894787

run clustering

Browse files
Files changed (3) hide show
  1. app.py +3 -3
  2. scripts/clusterprosody.py +102 -7
  3. scripts/runSQ.py +3 -3
app.py CHANGED
@@ -33,9 +33,9 @@ setup()
33
 
34
  def f1(voices, sent, indices):
35
  #tts_audio, tts_score, graph = scripts.runSQ.run(sent, voices, indices)
36
- tts_audio, tts_score, graph = scripts.runSQ.run(sent, [voices], indices)
37
  score_report = f'Difference from TTS to real speech: {round(tts_score,2)}'
38
- return (tts_audio, score_report, tts_graph, mid_graph, bad_graph)
39
 
40
 
41
  def label_indices(sentence):
@@ -46,7 +46,7 @@ def label_indices(sentence):
46
 
47
 
48
 
49
- temp_sentences = scripts.runSQ.snorm.create_temp_sent_list()
50
 
51
  bl = gr.Blocks()
52
  with bl:
 
33
 
34
  def f1(voices, sent, indices):
35
  #tts_audio, tts_score, graph = scripts.runSQ.run(sent, voices, indices)
36
+ tts_audio, tts_score, tts_fig_p, mid_fig_p, bad_fig_p, tts_fig_e, fig_mid_e, fig_bad_e = scripts.runSQ.run(sent, [voices], indices)
37
  score_report = f'Difference from TTS to real speech: {round(tts_score,2)}'
38
+ return (tts_audio, score_report, tts_fig_p, mid_fig_p, bad_fig_p)
39
 
40
 
41
  def label_indices(sentence):
 
46
 
47
 
48
 
49
+ temp_sentences = scripts.runSQ.create_temp_sent_list()
50
 
51
  bl = gr.Blocks()
52
  with bl:
scripts/clusterprosody.py CHANGED
@@ -302,16 +302,21 @@ def match_tts(clusters, speech_data, tts_data, tts_align, words, seg_aligns, voi
302
 
303
  # now do graphs of matched_data with tts_data
304
  # and report best_cluster_score
305
- tts_fig = plot_pitch_tts(matched_data,tts_data, tts_align, words,seg_aligns,best_cluster,voice)
306
 
307
  mid_cluster = tts_info[1][0]
308
  mid_data = {f'{words}**{r}': speech_data[f'{words}**{r}'] for r,c in clusters if c==mid_cluster}
309
  bad_cluster = tts_info[2][0]
310
  bad_data = {f'{words}**{r}': speech_data[f'{words}**{r}'] for r,c in clusters if c==bad_cluster}
311
- fig_mid = plot_pitch_cluster(mid_data,words,seg_aligns,mid_cluster)
312
- fig_bad = plot_pitch_cluster(bad_data,words,seg_aligns,bad_cluster)
 
 
 
 
 
 
313
 
314
- return best_cluster_score, tts_fig, fig_mid, fig_bad
315
 
316
 
317
 
@@ -353,11 +358,11 @@ def cluster(norm_sent,orig_sent,h_spk_ids, h_align_dir, h_f0_dir, h_wav_dir, tts
353
  tts_data, tts_align = get_tts_data(tdir,v,start_end_word_index)
354
 
355
  # match the data with a cluster -----
356
- best_cluster_score, tts_fig, fig_mid, fig_bad = match_tts(groups, data, tts_data, tts_align, words, seg_aligns,v)
357
 
358
  # only supports one voice at a time currently
359
- return best_cluster_score, tts_fig, fig_mid, fig_bad
360
- #return words, kmedoids_cluster_dists, groups
361
 
362
 
363
 
@@ -526,6 +531,96 @@ def plot_pitch_cluster(speech_data,words,seg_aligns,cluster_id):
526
 
527
 
528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529
 
530
  # want to:
531
  # - find tts best cluster
 
302
 
303
  # now do graphs of matched_data with tts_data
304
  # and report best_cluster_score
 
305
 
306
  mid_cluster = tts_info[1][0]
307
  mid_data = {f'{words}**{r}': speech_data[f'{words}**{r}'] for r,c in clusters if c==mid_cluster}
308
  bad_cluster = tts_info[2][0]
309
  bad_data = {f'{words}**{r}': speech_data[f'{words}**{r}'] for r,c in clusters if c==bad_cluster}
310
+
311
+ tts_fig_p = plot_pitch_tts(matched_data,tts_data, tts_align, words,seg_aligns,best_cluster,voice)
312
+ fig_mid_p = plot_pitch_cluster(mid_data,words,seg_aligns,mid_cluster)
313
+ fig_bad_p = plot_pitch_cluster(bad_data,words,seg_aligns,bad_cluster)
314
+
315
+ tts_fig_e = plot_rmse_tts(matched_data,tts_data, tts_align, words,seg_aligns,best_cluster,voice)
316
+ fig_mid_e = plot_rmse_cluster(mid_data,words,seg_aligns,mid_cluster)
317
+ fig_bad_e = plot_rmse_cluster(bad_data,words,seg_aligns,bad_cluster)
318
 
319
+ return best_cluster_score, tts_fig_p, fig_mid_p, fig_bad_p, tts_fig_e, fig_mid_e, fig_bad_e
320
 
321
 
322
 
 
358
  tts_data, tts_align = get_tts_data(tdir,v,start_end_word_index)
359
 
360
  # match the data with a cluster -----
361
+ best_cluster_score, tts_fig_p, fig_mid_p, fig_bad_p, tts_fig_e, fig_mid_e, fig_bad_e = match_tts(groups, data, tts_data, tts_align, words, seg_aligns,v)
362
 
363
  # only supports one voice at a time currently
364
+ return best_cluster_score, tts_fig_p, fig_mid_p, fig_bad_p, tts_fig_e, fig_mid_e, fig_bad_e
365
+ #return words, kmedoids_cluster_dists, group
366
 
367
 
368
 
 
531
 
532
 
533
 
534
+ def plot_rmse_tts(speech_data,tts_data, tts_align,words,seg_aligns,cluster_id, voice):
535
+ colors = ["red", "green", "blue", "orange", "purple", "pink", "brown", "gray", "cyan"]
536
+ cc = 0
537
+ fig = plt.figure(figsize=(10, 5))
538
+ plt.title(f"{words} - Energy - Cluster {cluster_id}")
539
+ for k,v in speech_data.items():
540
+
541
+ spk = k.split('**')[1]
542
+
543
+ word_times = seg_aligns[k]
544
+
545
+ rmse = [e for p,e in v]
546
+ # datapoint interval is 0.005 seconds
547
+ rmse_xvals = [x*0.005 for x in range(len(rmse))]
548
+
549
+ # centre around the first word boundary -
550
+ # if 3+ words, too bad.
551
+ if len(word_times)>1:
552
+ realign = np.mean([word_times[0][2],word_times[1][1]])
553
+ rmse_xvals = [x - realign for x in rmse_xvals]
554
+ word_times = [(w,s-realign,e-realign) for w,s,e in word_times]
555
+ plt.axvline(x= 0, color="gray", linestyle='--', linewidth=1, label=f"{word_times[0][0]} -> {word_times[1][0]} boundary")
556
+
557
+ if len(word_times)>2:
558
+ for i in range(1,len(word_times)-1):
559
+ bound_line = np.mean([word_times[i][2],word_times[i+1][1]])
560
+ plt.axvline(x=bound_line, color=colors[cc], linestyle='--', linewidth=1, label=f"Speaker {spk} -> {word_times[i+1][0]}")
561
+
562
+ plt.scatter(rmse_xvals, rmse, color=colors[cc], label=f"Speaker {spk}")
563
+ cc += 1
564
+ if cc >= len(colors):
565
+ cc=0
566
+
567
+ trmse = [e for p,e in tts_data]
568
+ t_xvals = [x*0.005 for x in range(len(trmse))]
569
+
570
+ if len(tts_align)>1:
571
+ realign = tts_align[1][1]
572
+ t_xvals = [x - realign for x in t_xvals]
573
+ tts_align = [(w,s-realign) for w,s in tts_align]
574
+
575
+ if len(tts_align)>2:
576
+ for i in range(2,len(tts_align)):
577
+ bound_line = tts_align[i][1]
578
+ plt.axvline(x=bound_line, color="black", linestyle='--', linewidth=1, label=f"TTS -> {tts_align[i][0]}")
579
+ plt.scatter(t_xvals, trmse, color="black", label=f"TTS {voice}")
580
+
581
+
582
+ #plt.legend()
583
+ #plt.show()
584
+
585
+
586
+ return fig
587
+
588
+
589
+ def plot_rmse_cluster(speech_data,words,seg_aligns,cluster_id):
590
+ colors = ["red", "green", "blue", "orange", "purple", "pink", "brown", "gray", "cyan"]
591
+ cc = 0
592
+ fig = plt.figure(figsize=(10, 5))
593
+ plt.title(f"{words} - Energy - Cluster {cluster_id}")
594
+ for k,v in speech_data.items():
595
+
596
+ spk = k.split('**')[1]
597
+
598
+ word_times = seg_aligns[k]
599
+
600
+ rmse = [e for p,e in v]
601
+ # datapoint interval is 0.005 seconds
602
+ rmse_xvals = [x*0.005 for x in range(len(rmse))]
603
+
604
+ # centre around the first word boundary -
605
+ # if 3+ words, too bad.
606
+ if len(word_times)>1:
607
+ realign = np.mean([word_times[0][2],word_times[1][1]])
608
+ rmse_xvals = [x - realign for x in rmse_xvals]
609
+ word_times = [(w,s-realign,e-realign) for w,s,e in word_times]
610
+ plt.axvline(x= 0, color="gray", linestyle='--', linewidth=1, label=f"{word_times[0][0]} -> {word_times[1][0]} boundary")
611
+
612
+ if len(word_times)>2:
613
+ for i in range(1,len(word_times)-1):
614
+ bound_line = np.mean([word_times[i][2],word_times[i+1][1]])
615
+ plt.axvline(x=bound_line, color=colors[cc], linestyle='--', linewidth=1, label=f"Speaker {spk} -> {word_times[i+1][0]}")
616
+
617
+ plt.scatter(rmse_xvals, rmse, color=colors[cc], label=f"Speaker {spk}")
618
+ cc += 1
619
+ if cc >= len(colors):
620
+ cc=0
621
+
622
+ return fig
623
+
624
 
625
  # want to:
626
  # - find tts best cluster
scripts/runSQ.py CHANGED
@@ -42,11 +42,11 @@ def run(sentence, voices, start_end_word_ix):
42
  tts_sample, tts_speechmarks = get_tts(sentence,voices,tts_dir)
43
  f0_tts(sentence, voices, tts_dir)
44
 
45
- score, fig = cl.cluster(norm_sentence, sentence, human_rec_ids, speech_aligns, speech_f0, speech_dir, tts_dir, voices, start_end_word_ix)
46
 
47
  # also stop forgetting duration.
48
 
49
- return tts_sample, score, fig
50
 
51
 
52
  def snorm(s):
@@ -253,7 +253,7 @@ def localtest():
253
  f0_tts(sentence, voices, tts_dir, reaper_path = reaper_exc)
254
 
255
 
256
- score, tts_fig, mid_fig, bad_fig = cl.cluster(norm_sentence, sentence, human_rec_ids, speech_aligns, speech_f0, speech_dir, tts_dir, voices, start_end_word_ix)
257
 
258
 
259
 
 
42
  tts_sample, tts_speechmarks = get_tts(sentence,voices,tts_dir)
43
  f0_tts(sentence, voices, tts_dir)
44
 
45
+ score, tts_fig_p, mid_fig_p, bad_fig_p, tts_fig_e, fig_mid_e, fig_bad_e = cl.cluster(norm_sentence, sentence, human_rec_ids, speech_aligns, speech_f0, speech_dir, tts_dir, voices, start_end_word_ix)
46
 
47
  # also stop forgetting duration.
48
 
49
+ return tts_sample, score, tts_fig_p, mid_fig_p, bad_fig_p, tts_fig_e, fig_mid_e, fig_bad_e
50
 
51
 
52
  def snorm(s):
 
253
  f0_tts(sentence, voices, tts_dir, reaper_path = reaper_exc)
254
 
255
 
256
+ score, tts_fig_p, mid_fig_p, bad_fig_p, tts_fig_e, fig_mid_e, fig_bad_e = cl.cluster(norm_sentence, sentence, human_rec_ids, speech_aligns, speech_f0, speech_dir, tts_dir, voices, start_end_word_ix)
257
 
258
 
259