matdmiller commited on
Commit
c59a6cf
1 Parent(s): 4f12daa

updated chunking and ui

Browse files
Files changed (2) hide show
  1. app.ipynb +195 -75
  2. app.py +76 -53
app.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 30,
6
  "id": "3bedf0dc-8d8e-4ede-a9e6-b8f35136aa00",
7
  "metadata": {},
8
  "outputs": [],
@@ -42,7 +42,7 @@
42
  },
43
  {
44
  "cell_type": "code",
45
- "execution_count": 1,
46
  "id": "667802a7-0f36-4136-a381-e66210b20462",
47
  "metadata": {},
48
  "outputs": [
@@ -94,7 +94,7 @@
94
  },
95
  {
96
  "cell_type": "code",
97
- "execution_count": 2,
98
  "id": "7664bc24-e8a7-440d-851d-eb16dc2d69fb",
99
  "metadata": {},
100
  "outputs": [
@@ -128,7 +128,7 @@
128
  },
129
  {
130
  "cell_type": "code",
131
- "execution_count": 3,
132
  "id": "4d9863fc-969e-409b-8e20-b9c3cd2cc3e7",
133
  "metadata": {},
134
  "outputs": [],
@@ -142,7 +142,7 @@
142
  },
143
  {
144
  "cell_type": "code",
145
- "execution_count": 4,
146
  "id": "4f486d3a",
147
  "metadata": {},
148
  "outputs": [],
@@ -187,7 +187,7 @@
187
  },
188
  {
189
  "cell_type": "code",
190
- "execution_count": 5,
191
  "id": "ecb7f207-0fc2-4d19-a313-356c05776832",
192
  "metadata": {},
193
  "outputs": [
@@ -208,7 +208,24 @@
208
  },
209
  {
210
  "cell_type": "code",
211
- "execution_count": 6,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  "id": "e5d6cac2-0dee-42d8-9b41-184b5be9cc3f",
213
  "metadata": {},
214
  "outputs": [],
@@ -219,7 +236,7 @@
219
  },
220
  {
221
  "cell_type": "code",
222
- "execution_count": 7,
223
  "id": "b77ad8d6-3289-463c-b213-1c0cc215b141",
224
  "metadata": {},
225
  "outputs": [
@@ -239,6 +256,9 @@
239
  " 'name': 'Open AI',\n",
240
  " 'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},\n",
241
  " 'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},\n",
 
 
 
242
  " }\n",
243
  " print('Successfully added OpenAI as Provider')\n",
244
  "except Exception as e:\n",
@@ -248,7 +268,7 @@
248
  },
249
  {
250
  "cell_type": "code",
251
- "execution_count": 8,
252
  "id": "87fca48b-a16a-4d2b-919c-75e88e4e5eb5",
253
  "metadata": {},
254
  "outputs": [
@@ -268,6 +288,10 @@
268
  " 'name': 'Cartesia AI',\n",
269
  " 'models': {'upbeat-moon': 'Sonic Turbo English'},\n",
270
  " 'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},\n",
 
 
 
 
271
  " }\n",
272
  " print('Successfully added Cartesia AI as Provider')\n",
273
  "except Exception as e:\n",
@@ -316,30 +340,62 @@
316
  },
317
  {
318
  "cell_type": "code",
319
- "execution_count": 9,
320
- "id": "8eb7e7d5-7121-4762-b8d1-e5a9539e2b36",
321
  "metadata": {},
322
  "outputs": [],
323
  "source": [
324
- "#| export\n",
325
- "clean_text_prompt = \"\"\"Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early.\"\"\"\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  ]
327
  },
328
  {
329
  "cell_type": "code",
330
- "execution_count": 10,
331
- "id": "52d373be-3a79-412e-8ca2-92bb443fa52d",
332
  "metadata": {},
333
  "outputs": [],
334
  "source": [
335
  "#| export\n",
336
- "#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.\n",
337
- "OPENAI_CLIENT_TTS_THREADS = 10 \n",
338
- "CARTESIAAI_CLIENT_TTS_THREADS = 3\n",
339
- "\n",
340
- "DEFAULT_PROVIDER = 'openai'\n",
341
- "DEFAULT_MODEL = 'tts-1'\n",
342
- "DEFAULT_VOICE = 'alloy'"
343
  ]
344
  },
345
  {
@@ -352,7 +408,7 @@
352
  },
353
  {
354
  "cell_type": "code",
355
- "execution_count": 11,
356
  "id": "b5b29507-92bc-453d-bcc5-6402c17e9a0d",
357
  "metadata": {},
358
  "outputs": [],
@@ -372,13 +428,17 @@
372
  },
373
  {
374
  "cell_type": "code",
375
- "execution_count": 12,
376
  "id": "24674094-4d47-4e48-b591-55faabcff8df",
377
  "metadata": {},
378
  "outputs": [],
379
  "source": [
380
  "#| export\n",
381
- "def split_text(input_text, max_length=4000, lookback=1000):\n",
 
 
 
 
382
  " # If the text is shorter than the max_length, return it as is\n",
383
  " if len(input_text) <= max_length:\n",
384
  " return [input_text]\n",
@@ -413,7 +473,7 @@
413
  },
414
  {
415
  "cell_type": "code",
416
- "execution_count": 13,
417
  "id": "e6224ae5-3792-42b2-8392-3abd42998a50",
418
  "metadata": {},
419
  "outputs": [],
@@ -452,10 +512,18 @@
452
  },
453
  {
454
  "cell_type": "code",
455
- "execution_count": 14,
456
  "id": "4691703d-ed0f-4481-8006-b2906289b780",
457
  "metadata": {},
458
- "outputs": [],
 
 
 
 
 
 
 
 
459
  "source": [
460
  "#| export\n",
461
  "def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):\n",
@@ -467,7 +535,10 @@
467
  " \n",
468
  " response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)\n",
469
  " client.close()\n",
470
- " return chunk_idx, response.content"
 
 
 
471
  ]
472
  },
473
  {
@@ -504,10 +575,18 @@
504
  },
505
  {
506
  "cell_type": "code",
507
- "execution_count": 15,
508
  "id": "3420c868-71cb-4ac6-ac65-6f02bfd841d1",
509
  "metadata": {},
510
- "outputs": [],
 
 
 
 
 
 
 
 
511
  "source": [
512
  "#| export\n",
513
  "def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon', \n",
@@ -527,54 +606,68 @@
527
  " output_format=output_format, \n",
528
  " **kwargs)\n",
529
  " client.close()\n",
530
- " return chunk_idx, response[\"audio\"]"
 
 
 
531
  ]
532
  },
533
  {
534
  "cell_type": "code",
535
- "execution_count": 16,
536
  "id": "d0082383-9d03-4b25-b68a-080d0b28caa9",
537
  "metadata": {},
538
  "outputs": [],
539
  "source": [
540
  "# test\n",
541
  "# create_speech_cartesiaai(1,\"Hi. What's your name?\", model='upbeat-moon',\n",
542
- "# voice='63ff761f-c1e8-414b-b969-d1833d1c870c')"
 
 
 
 
 
 
 
 
 
543
  ]
544
  },
545
  {
546
  "cell_type": "code",
547
- "execution_count": 17,
 
 
 
 
 
 
 
 
 
 
 
548
  "id": "e34bb4aa-698c-4452-8cda-bd02b38f7122",
549
  "metadata": {},
550
  "outputs": [],
551
  "source": [
552
  "#| export\n",
553
  "def create_speech(input_text, provider, model='tts-1', voice='alloy', \n",
554
- " profile: gr.OAuthProfile|None=None, \n",
555
  " progress=gr.Progress(), **kwargs):\n",
556
  "\n",
557
  " #Verify auth if it is required. This is very important if this is in a HF space. DO NOT DELETE!!!\n",
558
  " if REQUIRE_AUTH: verify_authorization(profile)\n",
559
  " start = datetime.now()\n",
560
  " \n",
561
- " if provider == 'cartesiaai':\n",
562
- " create_speech_func = create_speech_cartesiaai\n",
563
- " max_chunk_size = 500\n",
564
- " chunk_processing_time = 20\n",
565
- " threads = CARTESIAAI_CLIENT_TTS_THREADS\n",
566
- " audio_file_conversion_kwargs = {'format': 'raw', 'frame_rate': 44100, 'channels': 1, 'sample_width': 2}\n",
567
- " elif provider == 'openai':\n",
568
- " create_speech_func = create_speech_openai\n",
569
- " max_chunk_size = 4000\n",
570
- " chunk_processing_time = 60\n",
571
- " threads = OPENAI_CLIENT_TTS_THREADS\n",
572
- " audio_file_conversion_kwargs = {'format': 'mp3'}\n",
573
- " else:\n",
574
- " raise ValueError(f'Invalid argument provider: {provider}')\n",
575
  " \n",
576
  " # Split the input text into chunks\n",
577
- " chunks = split_text(input_text, max_length=max_chunk_size)\n",
578
  "\n",
579
  " # Initialize the progress bar\n",
580
  " progress(0, desc=f\"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.\")\n",
@@ -606,7 +699,7 @@
606
  },
607
  {
608
  "cell_type": "code",
609
- "execution_count": 19,
610
  "id": "ca2c6f8c-62ed-4ac1-9c2f-e3b2bfb47e8d",
611
  "metadata": {},
612
  "outputs": [],
@@ -618,7 +711,7 @@
618
  },
619
  {
620
  "cell_type": "code",
621
- "execution_count": 20,
622
  "id": "236dd8d3-4364-4731-af93-7dcdec6f18a1",
623
  "metadata": {},
624
  "outputs": [],
@@ -630,7 +723,7 @@
630
  },
631
  {
632
  "cell_type": "code",
633
- "execution_count": 21,
634
  "id": "0523a158-ee07-48b3-9350-ee39d4deee7f",
635
  "metadata": {},
636
  "outputs": [],
@@ -652,7 +745,7 @@
652
  },
653
  {
654
  "cell_type": "code",
655
- "execution_count": 22,
656
  "id": "f4d1ba0b-6960-4e22-8dba-7de70370753a",
657
  "metadata": {},
658
  "outputs": [],
@@ -664,7 +757,7 @@
664
  },
665
  {
666
  "cell_type": "code",
667
- "execution_count": 23,
668
  "id": "efa28cf2-548d-439f-bf2a-21a5edbf9eba",
669
  "metadata": {},
670
  "outputs": [],
@@ -677,7 +770,7 @@
677
  },
678
  {
679
  "cell_type": "code",
680
- "execution_count": 24,
681
  "id": "cdc1dde5-5edd-4dbf-bd11-30eb418c571d",
682
  "metadata": {},
683
  "outputs": [],
@@ -689,7 +782,7 @@
689
  },
690
  {
691
  "cell_type": "code",
692
- "execution_count": 25,
693
  "id": "035c33dd-c8e6-42b4-91d4-6bc5f1b36df3",
694
  "metadata": {},
695
  "outputs": [],
@@ -702,13 +795,27 @@
702
  },
703
  {
704
  "cell_type": "code",
705
- "execution_count": 26,
 
 
 
 
 
 
 
 
 
 
 
 
706
  "id": "e4fb3159-579b-4271-bc96-4cd1e2816eca",
707
  "metadata": {},
708
  "outputs": [],
709
  "source": [
710
  "#| export\n",
711
  "with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:\n",
 
 
712
  " gr.Markdown(\"# TTS\")\n",
713
  " gr.Markdown(\"\"\"Start typing below and then click **Go** to create the speech from your text.\n",
714
  "For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href=\"https://matdmiller-tts-openai.hf.space/\" target=\"_blank\">Spaces Link HTML</a>\"\"\")\n",
@@ -725,39 +832,52 @@
725
  " generation_cost = gr.Label(label=\"Generation cost\")\n",
726
  " with gr.Row():\n",
727
  " output_audio = gr.Audio()\n",
 
 
 
 
 
 
 
 
728
  "\n",
729
- " #input_text \n",
 
 
730
  " input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)\n",
731
  " input_text.input(fn=get_generation_cost, \n",
732
  " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], \n",
733
  " outputs=generation_cost)\n",
734
- "\n",
 
 
735
  " tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown], \n",
736
  " outputs=tts_model_dropdown)\n",
737
  " tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown], \n",
738
  " outputs=tts_voice_dropdown)\n",
 
739
  " \n",
 
740
  " tts_model_dropdown.change(fn=get_generation_cost, \n",
741
  " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)\n",
742
  " \n",
743
- " go_btn = gr.Button(\"Go\")\n",
744
  " go_btn.click(fn=create_speech, \n",
745
  " inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown], \n",
746
  " outputs=[output_audio])\n",
747
  " \n",
748
- " clear_btn = gr.Button('Clear')\n",
749
  " clear_btn.click(fn=lambda: '', outputs=input_text)\n",
750
  "\n",
751
  " if REQUIRE_AUTH:\n",
752
- " gr.LoginButton()\n",
753
- " m = gr.Markdown('')\n",
754
- " app.load(verify_authorization, None, m)\n",
755
  " "
756
  ]
757
  },
758
  {
759
  "cell_type": "code",
760
- "execution_count": 27,
761
  "id": "a00648a1-891b-470b-9959-f5d502055713",
762
  "metadata": {},
763
  "outputs": [],
@@ -771,7 +891,7 @@
771
  },
772
  {
773
  "cell_type": "code",
774
- "execution_count": 28,
775
  "id": "4b534fe7-4337-423e-846a-1bdb7cccc4ea",
776
  "metadata": {},
777
  "outputs": [
@@ -779,7 +899,7 @@
779
  "name": "stdout",
780
  "output_type": "stream",
781
  "text": [
782
- "Running on local URL: http://127.0.0.1:7861\n",
783
  "\n",
784
  "To create a public link, set `share=True` in `launch()`.\n"
785
  ]
@@ -787,7 +907,7 @@
787
  {
788
  "data": {
789
  "text/html": [
790
- "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
791
  ],
792
  "text/plain": [
793
  "<IPython.core.display.HTML object>"
@@ -800,7 +920,7 @@
800
  "data": {
801
  "text/plain": []
802
  },
803
- "execution_count": 28,
804
  "metadata": {},
805
  "output_type": "execute_result"
806
  }
@@ -828,7 +948,7 @@
828
  },
829
  {
830
  "cell_type": "code",
831
- "execution_count": 29,
832
  "id": "28e8d888-e790-46fa-bbac-4511b9ab796c",
833
  "metadata": {},
834
  "outputs": [
@@ -836,7 +956,7 @@
836
  "name": "stdout",
837
  "output_type": "stream",
838
  "text": [
839
- "Closing server running on port: 7861\n"
840
  ]
841
  }
842
  ],
@@ -847,7 +967,7 @@
847
  },
848
  {
849
  "cell_type": "code",
850
- "execution_count": 30,
851
  "id": "afbc9699-4d16-4060-88f4-cd1251754cbd",
852
  "metadata": {},
853
  "outputs": [],
@@ -858,7 +978,7 @@
858
  },
859
  {
860
  "cell_type": "code",
861
- "execution_count": 33,
862
  "id": "0420310d-930b-4904-8bd4-3458ad8bdbd3",
863
  "metadata": {},
864
  "outputs": [],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 2,
6
  "id": "3bedf0dc-8d8e-4ede-a9e6-b8f35136aa00",
7
  "metadata": {},
8
  "outputs": [],
 
42
  },
43
  {
44
  "cell_type": "code",
45
+ "execution_count": 3,
46
  "id": "667802a7-0f36-4136-a381-e66210b20462",
47
  "metadata": {},
48
  "outputs": [
 
94
  },
95
  {
96
  "cell_type": "code",
97
+ "execution_count": 4,
98
  "id": "7664bc24-e8a7-440d-851d-eb16dc2d69fb",
99
  "metadata": {},
100
  "outputs": [
 
128
  },
129
  {
130
  "cell_type": "code",
131
+ "execution_count": 5,
132
  "id": "4d9863fc-969e-409b-8e20-b9c3cd2cc3e7",
133
  "metadata": {},
134
  "outputs": [],
 
142
  },
143
  {
144
  "cell_type": "code",
145
+ "execution_count": 6,
146
  "id": "4f486d3a",
147
  "metadata": {},
148
  "outputs": [],
 
187
  },
188
  {
189
  "cell_type": "code",
190
+ "execution_count": 7,
191
  "id": "ecb7f207-0fc2-4d19-a313-356c05776832",
192
  "metadata": {},
193
  "outputs": [
 
208
  },
209
  {
210
  "cell_type": "code",
211
+ "execution_count": 8,
212
+ "id": "52d373be-3a79-412e-8ca2-92bb443fa52d",
213
+ "metadata": {},
214
+ "outputs": [],
215
+ "source": [
216
+ "#| export\n",
217
+ "#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.\n",
218
+ "OPENAI_CLIENT_TTS_THREADS = 10 \n",
219
+ "CARTESIAAI_CLIENT_TTS_THREADS = 3\n",
220
+ "\n",
221
+ "DEFAULT_PROVIDER = 'openai'\n",
222
+ "DEFAULT_MODEL = 'tts-1'\n",
223
+ "DEFAULT_VOICE = 'alloy'"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "code",
228
+ "execution_count": 9,
229
  "id": "e5d6cac2-0dee-42d8-9b41-184b5be9cc3f",
230
  "metadata": {},
231
  "outputs": [],
 
236
  },
237
  {
238
  "cell_type": "code",
239
+ "execution_count": 10,
240
  "id": "b77ad8d6-3289-463c-b213-1c0cc215b141",
241
  "metadata": {},
242
  "outputs": [
 
256
  " 'name': 'Open AI',\n",
257
  " 'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},\n",
258
  " 'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},\n",
259
+ " 'settings': {'max_chunk_size': 4000, 'chunk_processing_time': 60, \n",
260
+ " 'threads': OPENAI_CLIENT_TTS_THREADS,\n",
261
+ " 'audio_file_conversion_kwargs':{'format': 'mp3'}},\n",
262
  " }\n",
263
  " print('Successfully added OpenAI as Provider')\n",
264
  "except Exception as e:\n",
 
268
  },
269
  {
270
  "cell_type": "code",
271
+ "execution_count": 11,
272
  "id": "87fca48b-a16a-4d2b-919c-75e88e4e5eb5",
273
  "metadata": {},
274
  "outputs": [
 
288
  " 'name': 'Cartesia AI',\n",
289
  " 'models': {'upbeat-moon': 'Sonic Turbo English'},\n",
290
  " 'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},\n",
291
+ " 'settings': {'max_chunk_size': 500, 'chunk_processing_time': 20, \n",
292
+ " 'threads': CARTESIAAI_CLIENT_TTS_THREADS,\n",
293
+ " 'audio_file_conversion_kwargs':{'format': 'raw', 'frame_rate': 44100, \n",
294
+ " 'channels': 1, 'sample_width': 2}},\n",
295
  " }\n",
296
  " print('Successfully added Cartesia AI as Provider')\n",
297
  "except Exception as e:\n",
 
340
  },
341
  {
342
  "cell_type": "code",
343
+ "execution_count": 12,
344
+ "id": "d1352f28-f761-4e91-a9bc-4efe47552f4d",
345
  "metadata": {},
346
  "outputs": [],
347
  "source": [
348
+ "# {v['id']:v['name'] for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},"
349
+ ]
350
+ },
351
+ {
352
+ "cell_type": "markdown",
353
+ "id": "a06e16b3-6310-462e-8192-b65ca324d86f",
354
+ "metadata": {},
355
+ "source": [
356
+ "```\n",
357
+ "({'3b554273-4299-48b9-9aaf-eefd438e3941': 'Indian Lady',\n",
358
+ " '63ff761f-c1e8-414b-b969-d1833d1c870c': 'Confident British Man',\n",
359
+ " 'daf747c6-6bc2-4083-bd59-aa94dce23f5d': 'Middle Eastern Woman',\n",
360
+ " 'ed81fd13-2016-4a49-8fe3-c0d2761695fc': 'Sportsman',\n",
361
+ " 'f114a467-c40a-4db8-964d-aaba89cd08fa': 'Yogaman',\n",
362
+ " 'c45bc5ec-dc68-4feb-8829-6e6b2748095d': 'Movieman',\n",
363
+ " '87748186-23bb-4158-a1eb-332911b0b708': 'Wizardman',\n",
364
+ " '98a34ef2-2140-4c28-9c71-663dc4dd7022': 'Southern Man',\n",
365
+ " '79f8b5fb-2cc8-479a-80df-29f7a7cf1a3e': 'Nonfiction Man',\n",
366
+ " '36b42fcb-60c5-4bec-b077-cb1a00a92ec6': 'Pilot over Intercom',\n",
367
+ " '69267136-1bdc-412f-ad78-0caad210fb40': 'Friendly Reading Man',\n",
368
+ " '15a9cd88-84b0-4a8b-95f2-5d583b54c72e': 'Reading Lady',\n",
369
+ " '95856005-0332-41b0-935f-352e296aa0df': 'Classy British Man',\n",
370
+ " 'd46abd1d-2d02-43e8-819f-51fb652c1c61': 'Newsman',\n",
371
+ " '2ee87190-8f84-4925-97da-e52547f9462c': 'Child',\n",
372
+ " 'c2ac25f9-ecc4-4f56-9095-651354df60c0': 'Commercial Lady',\n",
373
+ " '5345cf08-6f37-424d-a5d9-8ae1101b9377': 'Maria',\n",
374
+ " 'a3520a8f-226a-428d-9fcd-b0a4711a6829': 'Reflective Woman',\n",
375
+ " 'e3827ec5-697a-4b7c-9704-1a23041bbc51': 'Sweet Lady',\n",
376
+ " 'a0e99841-438c-4a64-b679-ae501e7d6091': 'Barbershop Man',\n",
377
+ " 'cd17ff2d-5ea4-4695-be8f-42193949b946': 'Meditation Lady',\n",
378
+ " 'bf991597-6c13-47e4-8411-91ec2de5c466': 'Newslady',\n",
379
+ " '41534e16-2966-4c6b-9670-111411def906': \"1920's Radioman\",\n",
380
+ " '79a125e8-cd45-4c13-8a67-188112f4dd22': 'British Lady',\n",
381
+ " 'a167e0f3-df7e-4d52-a9c3-f949145efdab': 'Ted',\n",
382
+ " '248be419-c632-4f23-adf1-5324ed7dbf1d': 'Hannah',\n",
383
+ " 'c8605446-247c-4d39-acd4-8f4c28aa363c': 'Wise Lady',\n",
384
+ " '00a77add-48d5-4ef6-8157-71e5437b282d': 'Calm Lady',\n",
385
+ " '638efaaa-4d0c-442e-b701-3fae16aad012': 'Indian Man',\n",
386
+ " 'd6b6d712-6030-4420-9771-8b35f326fd20': 'Mat 1'},)\n",
387
+ "```"
388
  ]
389
  },
390
  {
391
  "cell_type": "code",
392
+ "execution_count": 13,
393
+ "id": "8eb7e7d5-7121-4762-b8d1-e5a9539e2b36",
394
  "metadata": {},
395
  "outputs": [],
396
  "source": [
397
  "#| export\n",
398
+ "clean_text_prompt = \"\"\"Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early.\"\"\"\n"
 
 
 
 
 
 
399
  ]
400
  },
401
  {
 
408
  },
409
  {
410
  "cell_type": "code",
411
+ "execution_count": 14,
412
  "id": "b5b29507-92bc-453d-bcc5-6402c17e9a0d",
413
  "metadata": {},
414
  "outputs": [],
 
428
  },
429
  {
430
  "cell_type": "code",
431
+ "execution_count": 15,
432
  "id": "24674094-4d47-4e48-b591-55faabcff8df",
433
  "metadata": {},
434
  "outputs": [],
435
  "source": [
436
  "#| export\n",
437
+ "def split_text(input_text, provider):\n",
438
+ " settings = providers[provider]['settings']\n",
439
+ " max_length = settings['max_chunk_size']\n",
440
+ " lookback = max_length // 4\n",
441
+ " \n",
442
  " # If the text is shorter than the max_length, return it as is\n",
443
  " if len(input_text) <= max_length:\n",
444
  " return [input_text]\n",
 
473
  },
474
  {
475
  "cell_type": "code",
476
+ "execution_count": 16,
477
  "id": "e6224ae5-3792-42b2-8392-3abd42998a50",
478
  "metadata": {},
479
  "outputs": [],
 
512
  },
513
  {
514
  "cell_type": "code",
515
+ "execution_count": 17,
516
  "id": "4691703d-ed0f-4481-8006-b2906289b780",
517
  "metadata": {},
518
+ "outputs": [
519
+ {
520
+ "name": "stdout",
521
+ "output_type": "stream",
522
+ "text": [
523
+ "Added create_speech_func for openai provider\n"
524
+ ]
525
+ }
526
+ ],
527
  "source": [
528
  "#| export\n",
529
  "def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):\n",
 
535
  " \n",
536
  " response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)\n",
537
  " client.close()\n",
538
+ " return chunk_idx, response.content\n",
539
+ "if 'openai' in providers.keys():\n",
540
+ " providers['openai']['settings']['create_speech_func'] = create_speech_openai\n",
541
+ " print('Added create_speech_func for openai provider')"
542
  ]
543
  },
544
  {
 
575
  },
576
  {
577
  "cell_type": "code",
578
+ "execution_count": 18,
579
  "id": "3420c868-71cb-4ac6-ac65-6f02bfd841d1",
580
  "metadata": {},
581
+ "outputs": [
582
+ {
583
+ "name": "stdout",
584
+ "output_type": "stream",
585
+ "text": [
586
+ "Added create_speech_func for create_speech_cartesiaai provider\n"
587
+ ]
588
+ }
589
+ ],
590
  "source": [
591
  "#| export\n",
592
  "def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon', \n",
 
606
  " output_format=output_format, \n",
607
  " **kwargs)\n",
608
  " client.close()\n",
609
+ " return chunk_idx, response[\"audio\"]\n",
610
+ "if 'cartesiaai' in providers.keys():\n",
611
+ " providers['cartesiaai']['settings']['create_speech_func'] = create_speech_cartesiaai\n",
612
+ " print('Added create_speech_func for create_speech_cartesiaai provider')"
613
  ]
614
  },
615
  {
616
  "cell_type": "code",
617
+ "execution_count": 19,
618
  "id": "d0082383-9d03-4b25-b68a-080d0b28caa9",
619
  "metadata": {},
620
  "outputs": [],
621
  "source": [
622
  "# test\n",
623
  "# create_speech_cartesiaai(1,\"Hi. What's your name?\", model='upbeat-moon',\n",
624
+ "# voice='63ff761f-c1e8-414b-b969-d1833d1c870c')\n",
625
+ "# text = \"\"\"Ladies and Gentlemen, gather 'round your radio sets for a special report on a remarkable phenomenon sweeping the nation! It seems a new generation of prodigies is lighting up our schools and captivating communities from coast to coast. Yes, we're talking about gifted children, those bright young minds whose talents and intellects are far beyond their tender years.\n",
626
+ "# In classrooms across the country, educators are reporting an unprecedented wave of exceptional youngsters. These gifted children, some as young as five or six, are astonishing their teachers with feats of arithmetic, literacy, and creativity that rival those of much older students. The age of enlightenment is upon us, and it starts with these brilliant boys and girls!\n",
627
+ "# Take young Tommy Caldwell of Newark, New Jersey, for instance. At the ripe age of seven, Tommy has already mastered algebra and is now diving into the wonders of geometry. His teacher, Miss Grace Whittaker, says she’s never seen such a gifted mathematician in her twenty years of teaching. And Tommy is just one of many!\n",
628
+ "# Then there's little Sally Henderson of Topeka, Kansas. At the tender age of eight, Sally has penned her first book of poetry, a collection that has local literary circles abuzz with excitement. Her vivid imagination and eloquent verse have earned her the nickname “The Young Bard of Topeka.” The town is swelling with pride for their young poetess.\n",
629
+ "# Experts from esteemed institutions like Harvard and Yale are paying close attention to this trend, conducting studies to understand the roots of such exceptional abilities. Some suggest it’s the result of improved nutrition and health care, while others believe it’s the progressive educational methods now being employed in our schools. Regardless of the cause, one thing is clear: America is home to a new generation of geniuses!\n",
630
+ "# Communities are rallying to support these young prodigies, with special programs and schools being established to nurture their extraordinary talents. Parents, teachers, and civic leaders are united in their mission to provide the resources and opportunities these gifted children need to reach their full potential.\n",
631
+ "# So, dear listeners, as you sit by your radios tonight, take heart in the knowledge that our future is bright. With these remarkable young minds leading the way, the possibilities are endless. This is your announcer signing off, wishing you all a splendid evening and a hopeful tomorrow.\"\"\"\n",
632
+ "# _, test_data = create_speech_cartesiaai(1, text, model='upbeat-moon',\n",
633
+ "# voice='41534e16-2966-4c6b-9670-111411def906')"
634
  ]
635
  },
636
  {
637
  "cell_type": "code",
638
+ "execution_count": 20,
639
+ "id": "649d90a5-9398-4cb5-a1e8-a464d463a11c",
640
+ "metadata": {},
641
+ "outputs": [],
642
+ "source": [
643
+ "# test_file = AudioSegment.from_file(io.BytesIO(test_data), **{'format': 'raw', 'frame_rate': 44100, 'channels': 1, 'sample_width': 2})\n",
644
+ "# test_file.export('./test3.mp3', format=\"mp3\")"
645
+ ]
646
+ },
647
+ {
648
+ "cell_type": "code",
649
+ "execution_count": 21,
650
  "id": "e34bb4aa-698c-4452-8cda-bd02b38f7122",
651
  "metadata": {},
652
  "outputs": [],
653
  "source": [
654
  "#| export\n",
655
  "def create_speech(input_text, provider, model='tts-1', voice='alloy', \n",
656
+ " profile: gr.OAuthProfile|None=None, # comment out of running locally\n",
657
  " progress=gr.Progress(), **kwargs):\n",
658
  "\n",
659
  " #Verify auth if it is required. This is very important if this is in a HF space. DO NOT DELETE!!!\n",
660
  " if REQUIRE_AUTH: verify_authorization(profile)\n",
661
  " start = datetime.now()\n",
662
  " \n",
663
+ " settings = providers[provider]['settings']\n",
664
+ " create_speech_func = settings['create_speech_func']\n",
665
+ " chunk_processing_time = settings['chunk_processing_time']\n",
666
+ " threads = settings['threads']\n",
667
+ " audio_file_conversion_kwargs = settings['audio_file_conversion_kwargs']\n",
 
 
 
 
 
 
 
 
 
668
  " \n",
669
  " # Split the input text into chunks\n",
670
+ " chunks = split_text(input_text, provider=provider)\n",
671
  "\n",
672
  " # Initialize the progress bar\n",
673
  " progress(0, desc=f\"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.\")\n",
 
699
  },
700
  {
701
  "cell_type": "code",
702
+ "execution_count": 22,
703
  "id": "ca2c6f8c-62ed-4ac1-9c2f-e3b2bfb47e8d",
704
  "metadata": {},
705
  "outputs": [],
 
711
  },
712
  {
713
  "cell_type": "code",
714
+ "execution_count": 23,
715
  "id": "236dd8d3-4364-4731-af93-7dcdec6f18a1",
716
  "metadata": {},
717
  "outputs": [],
 
723
  },
724
  {
725
  "cell_type": "code",
726
+ "execution_count": 24,
727
  "id": "0523a158-ee07-48b3-9350-ee39d4deee7f",
728
  "metadata": {},
729
  "outputs": [],
 
745
  },
746
  {
747
  "cell_type": "code",
748
+ "execution_count": 25,
749
  "id": "f4d1ba0b-6960-4e22-8dba-7de70370753a",
750
  "metadata": {},
751
  "outputs": [],
 
757
  },
758
  {
759
  "cell_type": "code",
760
+ "execution_count": 26,
761
  "id": "efa28cf2-548d-439f-bf2a-21a5edbf9eba",
762
  "metadata": {},
763
  "outputs": [],
 
770
  },
771
  {
772
  "cell_type": "code",
773
+ "execution_count": 27,
774
  "id": "cdc1dde5-5edd-4dbf-bd11-30eb418c571d",
775
  "metadata": {},
776
  "outputs": [],
 
782
  },
783
  {
784
  "cell_type": "code",
785
+ "execution_count": 28,
786
  "id": "035c33dd-c8e6-42b4-91d4-6bc5f1b36df3",
787
  "metadata": {},
788
  "outputs": [],
 
795
  },
796
  {
797
  "cell_type": "code",
798
+ "execution_count": 29,
799
+ "id": "c97c03af-a377-42e1-93e0-1df957c0e4cc",
800
+ "metadata": {},
801
+ "outputs": [],
802
+ "source": [
803
+ "def split_text_as_md(*args, **kwargs):\n",
804
+ " output = split_text(*args, **kwargs)\n",
805
+ " return '# Text Splits:\\n' + '<br>----------<br>'.join(output)"
806
+ ]
807
+ },
808
+ {
809
+ "cell_type": "code",
810
+ "execution_count": 30,
811
  "id": "e4fb3159-579b-4271-bc96-4cd1e2816eca",
812
  "metadata": {},
813
  "outputs": [],
814
  "source": [
815
  "#| export\n",
816
  "with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:\n",
817
+ " \n",
818
+ " ### Define UI ###\n",
819
  " gr.Markdown(\"# TTS\")\n",
820
  " gr.Markdown(\"\"\"Start typing below and then click **Go** to create the speech from your text.\n",
821
  "For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href=\"https://matdmiller-tts-openai.hf.space/\" target=\"_blank\">Spaces Link HTML</a>\"\"\")\n",
 
832
  " generation_cost = gr.Label(label=\"Generation cost\")\n",
833
  " with gr.Row():\n",
834
  " output_audio = gr.Audio()\n",
835
+ " go_btn = gr.Button(\"Go\")\n",
836
+ " clear_btn = gr.Button('Clear')\n",
837
+ " if REQUIRE_AUTH:\n",
838
+ " gr.LoginButton()\n",
839
+ " auth_md = gr.Markdown('')\n",
840
+ " \n",
841
+ " chunks_md = gr.Markdown('',label='Chunks')\n",
842
+ " \n",
843
  "\n",
844
+ " ### Define UI Actions ###\n",
845
+ " \n",
846
+ " # input_text \n",
847
  " input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)\n",
848
  " input_text.input(fn=get_generation_cost, \n",
849
  " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], \n",
850
  " outputs=generation_cost)\n",
851
+ " input_text.input(fn=split_text_as_md, inputs=[input_text,tts_provider_dropdown], outputs=chunks_md)\n",
852
+ " \n",
853
+ " # tts_provider_dropdown\n",
854
  " tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown], \n",
855
  " outputs=tts_model_dropdown)\n",
856
  " tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown], \n",
857
  " outputs=tts_voice_dropdown)\n",
858
+ " tts_provider_dropdown.change(fn=split_text_as_md, inputs=[input_text,tts_provider_dropdown], outputs=chunks_md)\n",
859
  " \n",
860
+ " # tts_model_dropdown\n",
861
  " tts_model_dropdown.change(fn=get_generation_cost, \n",
862
  " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)\n",
863
  " \n",
864
+ " \n",
865
  " go_btn.click(fn=create_speech, \n",
866
  " inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown], \n",
867
  " outputs=[output_audio])\n",
868
  " \n",
869
+ " \n",
870
  " clear_btn.click(fn=lambda: '', outputs=input_text)\n",
871
  "\n",
872
  " if REQUIRE_AUTH:\n",
873
+ " app.load(verify_authorization, None, auth_md)\n",
874
+ " \n",
 
875
  " "
876
  ]
877
  },
878
  {
879
  "cell_type": "code",
880
+ "execution_count": 31,
881
  "id": "a00648a1-891b-470b-9959-f5d502055713",
882
  "metadata": {},
883
  "outputs": [],
 
891
  },
892
  {
893
  "cell_type": "code",
894
+ "execution_count": 32,
895
  "id": "4b534fe7-4337-423e-846a-1bdb7cccc4ea",
896
  "metadata": {},
897
  "outputs": [
 
899
  "name": "stdout",
900
  "output_type": "stream",
901
  "text": [
902
+ "Running on local URL: http://127.0.0.1:7860\n",
903
  "\n",
904
  "To create a public link, set `share=True` in `launch()`.\n"
905
  ]
 
907
  {
908
  "data": {
909
  "text/html": [
910
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
911
  ],
912
  "text/plain": [
913
  "<IPython.core.display.HTML object>"
 
920
  "data": {
921
  "text/plain": []
922
  },
923
+ "execution_count": 32,
924
  "metadata": {},
925
  "output_type": "execute_result"
926
  }
 
948
  },
949
  {
950
  "cell_type": "code",
951
+ "execution_count": 33,
952
  "id": "28e8d888-e790-46fa-bbac-4511b9ab796c",
953
  "metadata": {},
954
  "outputs": [
 
956
  "name": "stdout",
957
  "output_type": "stream",
958
  "text": [
959
+ "Closing server running on port: 7860\n"
960
  ]
961
  }
962
  ],
 
967
  },
968
  {
969
  "cell_type": "code",
970
+ "execution_count": null,
971
  "id": "afbc9699-4d16-4060-88f4-cd1251754cbd",
972
  "metadata": {},
973
  "outputs": [],
 
978
  },
979
  {
980
  "cell_type": "code",
981
+ "execution_count": null,
982
  "id": "0420310d-930b-4904-8bd4-3458ad8bdbd3",
983
  "metadata": {},
984
  "outputs": [],
app.py CHANGED
@@ -1,8 +1,8 @@
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
- __all__ = ['secret_import_failed', 'TEMP', 'TEMP_DIR', 'providers', 'clean_text_prompt', 'OPENAI_CLIENT_TTS_THREADS',
5
- 'CARTESIAAI_CLIENT_TTS_THREADS', 'DEFAULT_PROVIDER', 'DEFAULT_MODEL', 'DEFAULT_VOICE', 'launch_kwargs',
6
  'queue_kwargs', 'verify_authorization', 'split_text', 'concatenate_audio', 'create_speech_openai',
7
  'create_speech_cartesiaai', 'create_speech', 'get_input_text_len', 'get_generation_cost',
8
  'get_model_choices', 'update_model_choices', 'get_voice_choices', 'update_voice_choices']
@@ -76,48 +76,55 @@ TEMP_DIR = Path(TEMP)
76
  print('TEMP Dir:', TEMP_DIR)
77
 
78
  # %% app.ipynb 12
79
- providers = dict()
 
 
 
 
 
 
80
 
81
  # %% app.ipynb 13
 
 
 
82
  # Add OpenAI as a provider
83
  try:
84
  providers['openai'] = {
85
  'name': 'Open AI',
86
  'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},
87
  'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},
 
 
 
88
  }
89
  print('Successfully added OpenAI as Provider')
90
  except Exception as e:
91
  print(f"""Error: Failed to add OpenAI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
92
  # providers
93
 
94
- # %% app.ipynb 14
95
  # Add Cartesia AI as a provider
96
  try:
97
  providers['cartesiaai'] = {
98
  'name': 'Cartesia AI',
99
  'models': {'upbeat-moon': 'Sonic Turbo English'},
100
  'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},
 
 
 
 
101
  }
102
  print('Successfully added Cartesia AI as Provider')
103
  except Exception as e:
104
  print(f"""Error: Failed to add Cartesia AI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
105
  # providers
106
 
107
- # %% app.ipynb 16
108
  clean_text_prompt = """Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early."""
109
 
110
 
111
- # %% app.ipynb 17
112
- #Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.
113
- OPENAI_CLIENT_TTS_THREADS = 10
114
- CARTESIAAI_CLIENT_TTS_THREADS = 3
115
-
116
- DEFAULT_PROVIDER = 'openai'
117
- DEFAULT_MODEL = 'tts-1'
118
- DEFAULT_VOICE = 'alloy'
119
-
120
- # %% app.ipynb 19
121
  def verify_authorization(profile: gr.OAuthProfile=None) -> str:
122
  print('Profile:', profile)
123
  if REQUIRE_AUTH == False:
@@ -129,8 +136,12 @@ def verify_authorization(profile: gr.OAuthProfile=None) -> str:
129
  raise PermissionError(f'Your huggingface username ({profile}) is not authorized. Must be set in ALLOWED_OAUTH_PROFILE_USERNAMES environment variable.')
130
  return None
131
 
132
- # %% app.ipynb 20
133
- def split_text(input_text, max_length=4000, lookback=1000):
 
 
 
 
134
  # If the text is shorter than the max_length, return it as is
135
  if len(input_text) <= max_length:
136
  return [input_text]
@@ -162,7 +173,7 @@ def split_text(input_text, max_length=4000, lookback=1000):
162
 
163
  return chunks
164
 
165
- # %% app.ipynb 21
166
  def concatenate_audio(files:list, **kwargs):
167
 
168
  # Initialize an empty AudioSegment object for concatenation
@@ -193,7 +204,7 @@ def concatenate_audio(files:list, **kwargs):
193
  print('Saving mp3 file to temp directory: ', filepath)
194
  return str(filepath)
195
 
196
- # %% app.ipynb 22
197
  def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):
198
  client = openai.OpenAI()
199
 
@@ -204,8 +215,11 @@ def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1
204
  response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)
205
  client.close()
206
  return chunk_idx, response.content
 
 
 
207
 
208
- # %% app.ipynb 24
209
  def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon',
210
  voice='248be419-c632-4f23-adf1-5324ed7dbf1d', #Hannah
211
  websocket=False,
@@ -224,33 +238,27 @@ def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon',
224
  **kwargs)
225
  client.close()
226
  return chunk_idx, response["audio"]
 
 
 
227
 
228
- # %% app.ipynb 26
229
  def create_speech(input_text, provider, model='tts-1', voice='alloy',
230
- profile: gr.OAuthProfile|None=None,
231
  progress=gr.Progress(), **kwargs):
232
 
233
  #Verify auth if it is required. This is very important if this is in a HF space. DO NOT DELETE!!!
234
  if REQUIRE_AUTH: verify_authorization(profile)
235
  start = datetime.now()
236
 
237
- if provider == 'cartesiaai':
238
- create_speech_func = create_speech_cartesiaai
239
- max_chunk_size = 500
240
- chunk_processing_time = 20
241
- threads = CARTESIAAI_CLIENT_TTS_THREADS
242
- audio_file_conversion_kwargs = {'format': 'raw', 'frame_rate': 44100, 'channels': 1, 'sample_width': 2}
243
- elif provider == 'openai':
244
- create_speech_func = create_speech_openai
245
- max_chunk_size = 4000
246
- chunk_processing_time = 60
247
- threads = OPENAI_CLIENT_TTS_THREADS
248
- audio_file_conversion_kwargs = {'format': 'mp3'}
249
- else:
250
- raise ValueError(f'Invalid argument provider: {provider}')
251
 
252
  # Split the input text into chunks
253
- chunks = split_text(input_text, max_length=max_chunk_size)
254
 
255
  # Initialize the progress bar
256
  progress(0, desc=f"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.")
@@ -280,11 +288,11 @@ def create_speech(input_text, provider, model='tts-1', voice='alloy',
280
  return combined_audio
281
 
282
 
283
- # %% app.ipynb 28
284
  def get_input_text_len(input_text):
285
  return len(input_text)
286
 
287
- # %% app.ipynb 29
288
  def get_generation_cost(input_text, tts_model_dropdown, provider):
289
  text_len = len(input_text)
290
  if provider == 'openai':
@@ -298,26 +306,28 @@ def get_generation_cost(input_text, tts_model_dropdown, provider):
298
  raise ValueError(f'Invalid argument provider: {provider}')
299
  return "${:,.3f}".format(cost)
300
 
301
- # %% app.ipynb 30
302
  def get_model_choices(provider):
303
  return sorted([(v,k) for k,v in providers[provider]['models'].items()])
304
 
305
- # %% app.ipynb 31
306
  def update_model_choices(provider):
307
  choices = get_model_choices(provider)
308
  return gr.update(choices=choices,value=choices[0][1])
309
 
310
- # %% app.ipynb 32
311
  def get_voice_choices(provider, model):
312
  return sorted([(v['name'],v['id']) for v in providers[provider]['voices'].values()])
313
 
314
- # %% app.ipynb 33
315
  def update_voice_choices(provider, model):
316
  choices = get_voice_choices(provider, model)
317
  return gr.update(choices=choices,value=choices[0][1])
318
 
319
- # %% app.ipynb 34
320
  with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:
 
 
321
  gr.Markdown("# TTS")
322
  gr.Markdown("""Start typing below and then click **Go** to create the speech from your text.
323
  For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href="https://matdmiller-tts-openai.hf.space/" target="_blank">Spaces Link HTML</a>""")
@@ -334,42 +344,55 @@ For requests longer than allowed by the API they will be broken into chunks auto
334
  generation_cost = gr.Label(label="Generation cost")
335
  with gr.Row():
336
  output_audio = gr.Audio()
 
 
 
 
 
 
 
 
337
 
338
- #input_text
 
 
339
  input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)
340
  input_text.input(fn=get_generation_cost,
341
  inputs=[input_text,tts_model_dropdown,tts_provider_dropdown],
342
  outputs=generation_cost)
343
-
 
 
344
  tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown],
345
  outputs=tts_model_dropdown)
346
  tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown],
347
  outputs=tts_voice_dropdown)
 
348
 
 
349
  tts_model_dropdown.change(fn=get_generation_cost,
350
  inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)
351
 
352
- go_btn = gr.Button("Go")
353
  go_btn.click(fn=create_speech,
354
  inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown],
355
  outputs=[output_audio])
356
 
357
- clear_btn = gr.Button('Clear')
358
  clear_btn.click(fn=lambda: '', outputs=input_text)
359
 
360
  if REQUIRE_AUTH:
361
- gr.LoginButton()
362
- m = gr.Markdown('')
363
- app.load(verify_authorization, None, m)
364
 
365
 
366
- # %% app.ipynb 35
367
  # launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
368
  # 'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
369
  launch_kwargs = {}
370
  queue_kwargs = {'default_concurrency_limit':10}
371
 
372
- # %% app.ipynb 37
373
  #.py launch
374
  if __name__ == "__main__":
375
  app.queue(**queue_kwargs)
 
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
+ __all__ = ['secret_import_failed', 'TEMP', 'TEMP_DIR', 'OPENAI_CLIENT_TTS_THREADS', 'CARTESIAAI_CLIENT_TTS_THREADS',
5
+ 'DEFAULT_PROVIDER', 'DEFAULT_MODEL', 'DEFAULT_VOICE', 'providers', 'clean_text_prompt', 'launch_kwargs',
6
  'queue_kwargs', 'verify_authorization', 'split_text', 'concatenate_audio', 'create_speech_openai',
7
  'create_speech_cartesiaai', 'create_speech', 'get_input_text_len', 'get_generation_cost',
8
  'get_model_choices', 'update_model_choices', 'get_voice_choices', 'update_voice_choices']
 
76
  print('TEMP Dir:', TEMP_DIR)
77
 
78
  # %% app.ipynb 12
79
+ #Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.
80
+ OPENAI_CLIENT_TTS_THREADS = 10
81
+ CARTESIAAI_CLIENT_TTS_THREADS = 3
82
+
83
+ DEFAULT_PROVIDER = 'openai'
84
+ DEFAULT_MODEL = 'tts-1'
85
+ DEFAULT_VOICE = 'alloy'
86
 
87
  # %% app.ipynb 13
88
+ providers = dict()
89
+
90
+ # %% app.ipynb 14
91
  # Add OpenAI as a provider
92
  try:
93
  providers['openai'] = {
94
  'name': 'Open AI',
95
  'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},
96
  'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},
97
+ 'settings': {'max_chunk_size': 4000, 'chunk_processing_time': 60,
98
+ 'threads': OPENAI_CLIENT_TTS_THREADS,
99
+ 'audio_file_conversion_kwargs':{'format': 'mp3'}},
100
  }
101
  print('Successfully added OpenAI as Provider')
102
  except Exception as e:
103
  print(f"""Error: Failed to add OpenAI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
104
  # providers
105
 
106
+ # %% app.ipynb 15
107
  # Add Cartesia AI as a provider
108
  try:
109
  providers['cartesiaai'] = {
110
  'name': 'Cartesia AI',
111
  'models': {'upbeat-moon': 'Sonic Turbo English'},
112
  'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},
113
+ 'settings': {'max_chunk_size': 500, 'chunk_processing_time': 20,
114
+ 'threads': CARTESIAAI_CLIENT_TTS_THREADS,
115
+ 'audio_file_conversion_kwargs':{'format': 'raw', 'frame_rate': 44100,
116
+ 'channels': 1, 'sample_width': 2}},
117
  }
118
  print('Successfully added Cartesia AI as Provider')
119
  except Exception as e:
120
  print(f"""Error: Failed to add Cartesia AI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
121
  # providers
122
 
123
+ # %% app.ipynb 19
124
  clean_text_prompt = """Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early."""
125
 
126
 
127
+ # %% app.ipynb 21
 
 
 
 
 
 
 
 
 
128
  def verify_authorization(profile: gr.OAuthProfile=None) -> str:
129
  print('Profile:', profile)
130
  if REQUIRE_AUTH == False:
 
136
  raise PermissionError(f'Your huggingface username ({profile}) is not authorized. Must be set in ALLOWED_OAUTH_PROFILE_USERNAMES environment variable.')
137
  return None
138
 
139
+ # %% app.ipynb 22
140
+ def split_text(input_text, provider):
141
+ settings = providers[provider]['settings']
142
+ max_length = settings['max_chunk_size']
143
+ lookback = max_length // 4
144
+
145
  # If the text is shorter than the max_length, return it as is
146
  if len(input_text) <= max_length:
147
  return [input_text]
 
173
 
174
  return chunks
175
 
176
+ # %% app.ipynb 23
177
  def concatenate_audio(files:list, **kwargs):
178
 
179
  # Initialize an empty AudioSegment object for concatenation
 
204
  print('Saving mp3 file to temp directory: ', filepath)
205
  return str(filepath)
206
 
207
+ # %% app.ipynb 24
208
  def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):
209
  client = openai.OpenAI()
210
 
 
215
  response = _create_speech_with_backoff(input=input, model=model, voice=voice, speed=speed, **kwargs)
216
  client.close()
217
  return chunk_idx, response.content
218
+ if 'openai' in providers.keys():
219
+ providers['openai']['settings']['create_speech_func'] = create_speech_openai
220
+ print('Added create_speech_func for openai provider')
221
 
222
+ # %% app.ipynb 26
223
  def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon',
224
  voice='248be419-c632-4f23-adf1-5324ed7dbf1d', #Hannah
225
  websocket=False,
 
238
  **kwargs)
239
  client.close()
240
  return chunk_idx, response["audio"]
241
+ if 'cartesiaai' in providers.keys():
242
+ providers['cartesiaai']['settings']['create_speech_func'] = create_speech_cartesiaai
243
+ print('Added create_speech_func for create_speech_cartesiaai provider')
244
 
245
+ # %% app.ipynb 29
246
  def create_speech(input_text, provider, model='tts-1', voice='alloy',
247
+ profile: gr.OAuthProfile|None=None, # comment out of running locally
248
  progress=gr.Progress(), **kwargs):
249
 
250
  #Verify auth if it is required. This is very important if this is in a HF space. DO NOT DELETE!!!
251
  if REQUIRE_AUTH: verify_authorization(profile)
252
  start = datetime.now()
253
 
254
+ settings = providers[provider]['settings']
255
+ create_speech_func = settings['create_speech_func']
256
+ chunk_processing_time = settings['chunk_processing_time']
257
+ threads = settings['threads']
258
+ audio_file_conversion_kwargs = settings['audio_file_conversion_kwargs']
 
 
 
 
 
 
 
 
 
259
 
260
  # Split the input text into chunks
261
+ chunks = split_text(input_text, provider=provider)
262
 
263
  # Initialize the progress bar
264
  progress(0, desc=f"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.")
 
288
  return combined_audio
289
 
290
 
291
+ # %% app.ipynb 31
292
  def get_input_text_len(input_text):
293
  return len(input_text)
294
 
295
+ # %% app.ipynb 32
296
  def get_generation_cost(input_text, tts_model_dropdown, provider):
297
  text_len = len(input_text)
298
  if provider == 'openai':
 
306
  raise ValueError(f'Invalid argument provider: {provider}')
307
  return "${:,.3f}".format(cost)
308
 
309
+ # %% app.ipynb 33
310
  def get_model_choices(provider):
311
  return sorted([(v,k) for k,v in providers[provider]['models'].items()])
312
 
313
+ # %% app.ipynb 34
314
  def update_model_choices(provider):
315
  choices = get_model_choices(provider)
316
  return gr.update(choices=choices,value=choices[0][1])
317
 
318
+ # %% app.ipynb 35
319
  def get_voice_choices(provider, model):
320
  return sorted([(v['name'],v['id']) for v in providers[provider]['voices'].values()])
321
 
322
+ # %% app.ipynb 36
323
  def update_voice_choices(provider, model):
324
  choices = get_voice_choices(provider, model)
325
  return gr.update(choices=choices,value=choices[0][1])
326
 
327
+ # %% app.ipynb 38
328
  with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:
329
+
330
+ ### Define UI ###
331
  gr.Markdown("# TTS")
332
  gr.Markdown("""Start typing below and then click **Go** to create the speech from your text.
333
  For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href="https://matdmiller-tts-openai.hf.space/" target="_blank">Spaces Link HTML</a>""")
 
344
  generation_cost = gr.Label(label="Generation cost")
345
  with gr.Row():
346
  output_audio = gr.Audio()
347
+ go_btn = gr.Button("Go")
348
+ clear_btn = gr.Button('Clear')
349
+ if REQUIRE_AUTH:
350
+ gr.LoginButton()
351
+ auth_md = gr.Markdown('')
352
+
353
+ chunks_md = gr.Markdown('',label='Chunks')
354
+
355
 
356
+ ### Define UI Actions ###
357
+
358
+ # input_text
359
  input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)
360
  input_text.input(fn=get_generation_cost,
361
  inputs=[input_text,tts_model_dropdown,tts_provider_dropdown],
362
  outputs=generation_cost)
363
+ input_text.input(fn=split_text_as_md, inputs=[input_text,tts_provider_dropdown], outputs=chunks_md)
364
+
365
+ # tts_provider_dropdown
366
  tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown],
367
  outputs=tts_model_dropdown)
368
  tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown],
369
  outputs=tts_voice_dropdown)
370
+ tts_provider_dropdown.change(fn=split_text_as_md, inputs=[input_text,tts_provider_dropdown], outputs=chunks_md)
371
 
372
+ # tts_model_dropdown
373
  tts_model_dropdown.change(fn=get_generation_cost,
374
  inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)
375
 
376
+
377
  go_btn.click(fn=create_speech,
378
  inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown],
379
  outputs=[output_audio])
380
 
381
+
382
  clear_btn.click(fn=lambda: '', outputs=input_text)
383
 
384
  if REQUIRE_AUTH:
385
+ app.load(verify_authorization, None, auth_md)
386
+
 
387
 
388
 
389
+ # %% app.ipynb 39
390
  # launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
391
  # 'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
392
  launch_kwargs = {}
393
  queue_kwargs = {'default_concurrency_limit':10}
394
 
395
+ # %% app.ipynb 41
396
  #.py launch
397
  if __name__ == "__main__":
398
  app.queue(**queue_kwargs)