matdmiller commited on
Commit
666e796
1 Parent(s): 17d79ff

add support for cartesia

Browse files
Files changed (3) hide show
  1. app.ipynb +493 -124
  2. app.py +186 -112
  3. requirements.txt +4 -3
app.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": null,
6
  "id": "3bedf0dc-8d8e-4ede-a9e6-b8f35136aa00",
7
  "metadata": {},
8
  "outputs": [],
@@ -10,41 +10,123 @@
10
  "#|default_exp app"
11
  ]
12
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  {
14
  "cell_type": "code",
15
- "execution_count": null,
16
  "id": "667802a7-0f36-4136-a381-e66210b20462",
17
  "metadata": {},
18
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
19
  "source": [
20
  "#| export\n",
21
- "#tts_openai_secrets.py content:\n",
22
- "#import os\n",
23
- "#os.environ['OPENAI_API_KEY'] = 'sk-XXXXXXXXXXXXXXXXXXXXXX'\n",
24
  "import os\n",
25
  "secret_import_failed = False\n",
26
  "try:\n",
 
27
  " _ = os.environ['OPENAI_API_KEY']\n",
28
  " print('OPENAI_API_KEY environment variable was found.')\n",
29
  "except:\n",
30
  " print('OPENAI_API_KEY environment variable was not found.')\n",
31
  " secret_import_failed = True\n",
32
  "try:\n",
33
- " GRADIO_PASSWORD = os.environ['GRADIO_PASSWORD']\n",
34
- " print('GRADIO_PASSWORD environment variable was found.')\n",
 
 
 
 
 
 
35
  "except:\n",
36
- " print('GRADIO_PASSWORD environment variable was not found.')\n",
37
  " secret_import_failed = True\n",
38
  "\n",
39
  "if secret_import_failed == True:\n",
40
  " import tts_openai_secrets\n",
41
- " GRADIO_PASSWORD = os.environ['GRADIO_PASSWORD']\n",
 
 
42
  " print('import tts_openai_secrets succeeded')"
43
  ]
44
  },
45
  {
46
  "cell_type": "code",
47
- "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  "id": "4d9863fc-969e-409b-8e20-b9c3cd2cc3e7",
49
  "metadata": {},
50
  "outputs": [],
@@ -58,12 +140,13 @@
58
  },
59
  {
60
  "cell_type": "code",
61
- "execution_count": null,
62
  "id": "4f486d3a",
63
  "metadata": {},
64
  "outputs": [],
65
  "source": [
66
  "#| export\n",
 
67
  "import gradio as gr\n",
68
  "import openai\n",
69
  "from pydub import AudioSegment\n",
@@ -78,15 +161,42 @@
78
  " retry,\n",
79
  " stop_after_attempt,\n",
80
  " wait_random_exponential,\n",
81
- ") # for exponential backoff"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  ]
83
  },
84
  {
85
  "cell_type": "code",
86
- "execution_count": null,
87
  "id": "ecb7f207-0fc2-4d19-a313-356c05776832",
88
  "metadata": {},
89
- "outputs": [],
 
 
 
 
 
 
 
 
90
  "source": [
91
  "#| export\n",
92
  "TEMP = os.environ.get('GRADIO_TEMP_DIR','/tmp/')\n",
@@ -96,33 +206,115 @@
96
  },
97
  {
98
  "cell_type": "code",
99
- "execution_count": null,
100
- "id": "0ffd33b4-cb9b-4c01-bff6-4c3102854ab6",
101
  "metadata": {},
102
  "outputs": [],
103
  "source": [
104
  "#| export\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  "try:\n",
106
- " tts_models = [o.id for o in openai.models.list().data if 'tts' in o.id]\n",
107
- " print('successfully got tts model list:', tts_models)\n",
108
- "except:\n",
109
- " tts_models = ['tts-1']"
 
 
 
 
 
110
  ]
111
  },
112
  {
113
  "cell_type": "code",
114
- "execution_count": null,
115
- "id": "2ddbca5d-4b04-43ab-afaf-430802980e78",
116
  "metadata": {},
117
- "outputs": [],
 
 
 
 
 
 
 
 
118
  "source": [
119
  "#| export\n",
120
- "tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  ]
122
  },
123
  {
124
  "cell_type": "code",
125
- "execution_count": null,
126
  "id": "8eb7e7d5-7121-4762-b8d1-e5a9539e2b36",
127
  "metadata": {},
128
  "outputs": [],
@@ -133,19 +325,52 @@
133
  },
134
  {
135
  "cell_type": "code",
136
- "execution_count": null,
137
  "id": "52d373be-3a79-412e-8ca2-92bb443fa52d",
138
  "metadata": {},
139
  "outputs": [],
140
  "source": [
141
  "#| export\n",
142
  "#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.\n",
143
- "OPENAI_CLIENT_TTS_THREADS = 10 "
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  ]
145
  },
146
  {
147
  "cell_type": "code",
148
- "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  "id": "24674094-4d47-4e48-b591-55faabcff8df",
150
  "metadata": {},
151
  "outputs": [],
@@ -186,16 +411,14 @@
186
  },
187
  {
188
  "cell_type": "code",
189
- "execution_count": null,
190
  "id": "e6224ae5-3792-42b2-8392-3abd42998a50",
191
  "metadata": {},
192
  "outputs": [],
193
  "source": [
194
  "#| export\n",
195
- "def concatenate_mp3(mp3_files):\n",
196
- " # if len(mp3_files) == 1:\n",
197
- " # return mp3_files[0]\n",
198
- " # else:\n",
199
  " # Initialize an empty AudioSegment object for concatenation\n",
200
  " combined = AudioSegment.empty()\n",
201
  " \n",
@@ -231,7 +454,7 @@
231
  },
232
  {
233
  "cell_type": "code",
234
- "execution_count": null,
235
  "id": "4691703d-ed0f-4481-8006-b2906289b780",
236
  "metadata": {},
237
  "outputs": [],
@@ -249,31 +472,101 @@
249
  " return chunk_idx, response.content"
250
  ]
251
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  {
253
  "cell_type": "code",
254
- "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  "id": "e34bb4aa-698c-4452-8cda-bd02b38f7122",
256
  "metadata": {},
257
  "outputs": [],
258
  "source": [
259
  "#| export\n",
260
- "def create_speech2(input_text, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress(), **kwargs):\n",
261
- " print('cs2-profile:',profile)\n",
262
- " assert authorized(profile) is not None,'Unauthorized M'\n",
263
  " start = datetime.now()\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  " # Split the input text into chunks\n",
265
- " chunks = split_text(input_text)\n",
266
  "\n",
267
  " # Initialize the progress bar\n",
268
- " progress(0, desc=f\"Started processing {len(chunks)} text chunks using {OPENAI_CLIENT_TTS_THREADS} threads. ETA is ~{ceil(len(chunks)/OPENAI_CLIENT_TTS_THREADS)} min.\")\n",
269
  "\n",
270
  " # Initialize a list to hold the audio data of each chunk\n",
271
  " audio_data = []\n",
272
  "\n",
273
  " # Process each chunk\n",
274
- " with ThreadPool(processes=OPENAI_CLIENT_TTS_THREADS) as pool:\n",
275
  " results = pool.starmap(\n",
276
- " partial(create_speech_openai, model=model, voice=voice, **kwargs), \n",
277
  " zip(range(len(chunks)),chunks)\n",
278
  " )\n",
279
  " audio_data = [o[1] for o in sorted(results)]\n",
@@ -294,58 +587,17 @@
294
  },
295
  {
296
  "cell_type": "code",
297
- "execution_count": null,
298
- "id": "5388e860",
299
  "metadata": {},
300
  "outputs": [],
301
  "source": [
302
- "#| export\n",
303
- "def create_speech(input_text, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress()):\n",
304
- " assert authorized(profile) is not None,'Unauthorized M'\n",
305
- " # Split the input text into chunks\n",
306
- " chunks = split_text(input_text)\n",
307
- "\n",
308
- " # Initialize the progress bar\n",
309
- " progress(0, desc=\"Starting TTS processing...\")\n",
310
- "\n",
311
- " # Initialize a list to hold the audio data of each chunk\n",
312
- " audio_data = []\n",
313
- "\n",
314
- " # Create a client instance for OpenAI\n",
315
- " client = openai.OpenAI()\n",
316
- "\n",
317
- " # Calculate the progress increment for each chunk\n",
318
- " progress_increment = 1.0 / len(chunks)\n",
319
- "\n",
320
- " # Process each chunk\n",
321
- " for i, chunk in enumerate(chunks):\n",
322
- " response = client.audio.speech.create(\n",
323
- " model=model,\n",
324
- " voice=voice,\n",
325
- " input=chunk,\n",
326
- " speed=1.0\n",
327
- " )\n",
328
- " # Append the audio content of the response to the list\n",
329
- " audio_data.append(response.content)\n",
330
- "\n",
331
- " # Update the progress bar\n",
332
- " progress((i + 1) * progress_increment, desc=f\"Processing chunk {i + 1} of {len(chunks)}\")\n",
333
- "\n",
334
- " # Close the client connection\n",
335
- " client.close()\n",
336
- "\n",
337
- " # Concatenate the audio data from all chunks\n",
338
- " combined_audio = concatenate_mp3(audio_data)\n",
339
- "\n",
340
- " # Final update to the progress bar\n",
341
- " progress(1, desc=\"Processing completed\")\n",
342
- "\n",
343
- " return combined_audio\n"
344
  ]
345
  },
346
  {
347
  "cell_type": "code",
348
- "execution_count": null,
349
  "id": "236dd8d3-4364-4731-af93-7dcdec6f18a1",
350
  "metadata": {},
351
  "outputs": [],
@@ -357,75 +609,142 @@
357
  },
358
  {
359
  "cell_type": "code",
360
- "execution_count": null,
361
  "id": "0523a158-ee07-48b3-9350-ee39d4deee7f",
362
  "metadata": {},
363
  "outputs": [],
364
  "source": [
365
  "#| export\n",
366
- "def get_generation_cost(input_text, tts_model_dropdown):\n",
367
  " text_len = len(input_text)\n",
368
- " if tts_model_dropdown.endswith('-hd'):\n",
369
- " cost = text_len/1000 * 0.03\n",
 
 
 
 
 
370
  " else:\n",
371
- " cost = text_len/1000 * 0.015\n",
372
  " return \"${:,.3f}\".format(cost)"
373
  ]
374
  },
375
  {
376
  "cell_type": "code",
377
- "execution_count": null,
378
- "id": "b5b29507-92bc-453d-bcc5-6402c17e9a0d",
379
  "metadata": {},
380
  "outputs": [],
381
  "source": [
382
  "#| export\n",
383
- "def authorized(profile: gr.OAuthProfile=None) -> str:\n",
384
- " print('Profile:', profile)\n",
385
- " if profile is not None and profile.username in [\"matdmiller\"]:\n",
386
- " return f\"{profile.username}\"\n",
387
- " else:\n",
388
- " print('Unauthorized',profile)\n",
389
- " return None"
390
  ]
391
  },
392
  {
393
  "cell_type": "code",
394
- "execution_count": null,
395
- "id": "e4fb3159-579b-4271-bc96-4cd1e2816eca",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
  "metadata": {},
397
  "outputs": [],
398
  "source": [
399
  "#| export\n",
400
- "with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS', delete_cache=(3600,3600)) as app:\n",
401
- " gr.Markdown(\"# OpenAI TTS\")\n",
402
- " gr.Markdown(\"\"\"Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters. \n",
403
- "For requests longer than 4,000 chars they will be broken into chunks of 4,000 or less chars automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/)\"\"\")\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  " with gr.Row():\n",
405
  " input_text = gr.Textbox(max_lines=100, label=\"Enter text here\")\n",
406
  " with gr.Row():\n",
407
- " tts_model_dropdown = gr.Dropdown(value='tts-1',choices=tts_models, label='Model')\n",
408
- " tts_voice_dropdown = gr.Dropdown(value='alloy',choices=tts_voices,label='Voice')\n",
 
409
  " input_text_length = gr.Label(label=\"Number of characters\")\n",
410
  " generation_cost = gr.Label(label=\"Generation cost\")\n",
 
411
  " output_audio = gr.Audio()\n",
 
 
412
  " input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)\n",
413
- " input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
414
- " tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)\n",
 
 
 
 
 
 
 
 
 
 
415
  " go_btn = gr.Button(\"Go\")\n",
416
- " go_btn.click(fn=create_speech2, inputs=[input_text, tts_model_dropdown, tts_voice_dropdown], outputs=[output_audio])\n",
 
 
 
417
  " clear_btn = gr.Button('Clear')\n",
418
  " clear_btn.click(fn=lambda: '', outputs=input_text)\n",
419
  "\n",
420
- " gr.LoginButton()\n",
421
- " m = gr.Markdown('')\n",
422
- " app.load(authorized, None, m)\n",
 
423
  " "
424
  ]
425
  },
426
  {
427
  "cell_type": "code",
428
- "execution_count": null,
429
  "id": "a00648a1-891b-470b-9959-f5d502055713",
430
  "metadata": {},
431
  "outputs": [],
@@ -439,10 +758,40 @@
439
  },
440
  {
441
  "cell_type": "code",
442
- "execution_count": null,
443
  "id": "4b534fe7-4337-423e-846a-1bdb7cccc4ea",
444
  "metadata": {},
445
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
446
  "source": [
447
  "#| hide\n",
448
  "#Notebook launch\n",
@@ -466,10 +815,18 @@
466
  },
467
  {
468
  "cell_type": "code",
469
- "execution_count": null,
470
  "id": "28e8d888-e790-46fa-bbac-4511b9ab796c",
471
  "metadata": {},
472
- "outputs": [],
 
 
 
 
 
 
 
 
473
  "source": [
474
  "#| hide\n",
475
  "app.close()"
@@ -477,10 +834,22 @@
477
  },
478
  {
479
  "cell_type": "code",
480
- "execution_count": null,
481
  "id": "afbc9699-4d16-4060-88f4-cd1251754cbd",
482
  "metadata": {},
483
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
484
  "source": [
485
  "#| hide\n",
486
  "gr.close_all()"
@@ -488,7 +857,7 @@
488
  },
489
  {
490
  "cell_type": "code",
491
- "execution_count": 58,
492
  "id": "0420310d-930b-4904-8bd4-3458ad8bdbd3",
493
  "metadata": {},
494
  "outputs": [],
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "id": "3bedf0dc-8d8e-4ede-a9e6-b8f35136aa00",
7
  "metadata": {},
8
  "outputs": [],
 
10
  "#|default_exp app"
11
  ]
12
  },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "c2496690-28b2-4a79-89d5-f971b4d6f3d4",
16
+ "metadata": {},
17
+ "source": [
18
+ "# Initialization"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "id": "736caa6e-d79f-46e6-bf42-6594c8b809d4",
24
+ "metadata": {},
25
+ "source": [
26
+ "## Get/Set Environment Variables"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "id": "7baadc12-4748-4938-916f-0a256546c181",
32
+ "metadata": {},
33
+ "source": [
34
+ "If you want to run this locally without having to set up the environment variables in your system, you can create a file called `tts_openai_secrets.py` in the root directory with this content:\n",
35
+ "```python\n",
36
+ "import os\n",
37
+ "os.environ['OPENAI_API_KEY'] = 'sk-XXXXXXXXXXXXXXXXXXXXXX'\n",
38
+ "os.environ['CARTESIA_API_KEY'] = 'XXXXXXXXXXXXXXXXXXXXXX'\n",
39
+ "os.environ[\"ALLOWED_OAUTH_PROFILE_USERNAMES\"]= '<huggingface-username1>,<huggingface-username2>'\n",
40
+ "```"
41
+ ]
42
+ },
43
  {
44
  "cell_type": "code",
45
+ "execution_count": 2,
46
  "id": "667802a7-0f36-4136-a381-e66210b20462",
47
  "metadata": {},
48
+ "outputs": [
49
+ {
50
+ "name": "stdout",
51
+ "output_type": "stream",
52
+ "text": [
53
+ "OPENAI_API_KEY environment variable was not found.\n",
54
+ "CARTESIA_API_KEY environment variable was not found.\n",
55
+ "ALLOWED_OAUTH_PROFILE_USERNAMES environment variable was not found.\n",
56
+ "import tts_openai_secrets succeeded\n"
57
+ ]
58
+ }
59
+ ],
60
  "source": [
61
  "#| export\n",
62
+ "\n",
 
 
63
  "import os\n",
64
  "secret_import_failed = False\n",
65
  "try:\n",
66
+ " # don't need the openai api key in a variable\n",
67
  " _ = os.environ['OPENAI_API_KEY']\n",
68
  " print('OPENAI_API_KEY environment variable was found.')\n",
69
  "except:\n",
70
  " print('OPENAI_API_KEY environment variable was not found.')\n",
71
  " secret_import_failed = True\n",
72
  "try:\n",
73
+ " CARTESIA_API_KEY = os.environ['CARTESIA_API_KEY']\n",
74
+ " print('CARTESIA_API_KEY environment variable was found.')\n",
75
+ "except:\n",
76
+ " print('CARTESIA_API_KEY environment variable was not found.')\n",
77
+ " secret_import_failed = True\n",
78
+ "try:\n",
79
+ " ALLOWED_OAUTH_PROFILE_USERNAMES = os.environ['ALLOWED_OAUTH_PROFILE_USERNAMES']\n",
80
+ " print('ALLOWED_OAUTH_PROFILE_USERNAMES environment variable was found.')\n",
81
  "except:\n",
82
+ " print('ALLOWED_OAUTH_PROFILE_USERNAMES environment variable was not found.')\n",
83
  " secret_import_failed = True\n",
84
  "\n",
85
  "if secret_import_failed == True:\n",
86
  " import tts_openai_secrets\n",
87
+ " _ = os.environ['OPENAI_API_KEY']\n",
88
+ " CARTESIA_API_KEY = os.environ['CARTESIA_API_KEY']\n",
89
+ " ALLOWED_OAUTH_PROFILE_USERNAMES = os.environ['ALLOWED_OAUTH_PROFILE_USERNAMES']\n",
90
  " print('import tts_openai_secrets succeeded')"
91
  ]
92
  },
93
  {
94
  "cell_type": "code",
95
+ "execution_count": 3,
96
+ "id": "7664bc24-e8a7-440d-851d-eb16dc2d69fb",
97
+ "metadata": {},
98
+ "outputs": [
99
+ {
100
+ "name": "stdout",
101
+ "output_type": "stream",
102
+ "text": [
103
+ "REQUIRE_AUTH: False\n"
104
+ ]
105
+ }
106
+ ],
107
+ "source": [
108
+ "#| export\n",
109
+ "# If REQUIRE_AUTH environemnt variable is set to 'false' (from secrets) and HF_SPACE != 1 then we\n",
110
+ "# are running locally and don't require authentication and authorization, otherwise we do.\n",
111
+ "# We are using paid API's so don't want anybody/everybody to be able to use our paid services.\n",
112
+ "if os.environ.get(\"REQUIRE_AUTH\",'true') == 'false' and os.environ.get('HF_SPACE',0) != 1:\n",
113
+ " REQUIRE_AUTH = False\n",
114
+ "else:\n",
115
+ " REQUIRE_AUTH = True\n",
116
+ "print('REQUIRE_AUTH:',REQUIRE_AUTH)"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "markdown",
121
+ "id": "8c978095-da2a-43f8-9729-3d845e7056f1",
122
+ "metadata": {},
123
+ "source": [
124
+ "## Imports"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "code",
129
+ "execution_count": 4,
130
  "id": "4d9863fc-969e-409b-8e20-b9c3cd2cc3e7",
131
  "metadata": {},
132
  "outputs": [],
 
140
  },
141
  {
142
  "cell_type": "code",
143
+ "execution_count": 5,
144
  "id": "4f486d3a",
145
  "metadata": {},
146
  "outputs": [],
147
  "source": [
148
  "#| export\n",
149
+ "import os\n",
150
  "import gradio as gr\n",
151
  "import openai\n",
152
  "from pydub import AudioSegment\n",
 
161
  " retry,\n",
162
  " stop_after_attempt,\n",
163
  " wait_random_exponential,\n",
164
+ ") # for exponential backoff\n",
165
+ "import traceback\n",
166
+ "# from cartesia.tts import CartesiaTTS\n",
167
+ "import cartesia"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "markdown",
172
+ "id": "6b425ab4-cecd-4760-84fb-b7f2cc44a565",
173
+ "metadata": {},
174
+ "source": [
175
+ "Set the Gradio TEMP directory. This will be used to save audio files that were generated prior to returning them. The reason we are doing this is because if you return a bytesio object to a Gradio audio object it will not contain the file extension and will not be playable in Safari. If you pass the file to the Gradio audio object it will contain the extension. In addition if you pass the filepath instead of bytesio path, when you download the audio it will have the correct file extenion whereas otherwise it will not."
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "markdown",
180
+ "id": "852a3a1f-462a-41ab-bc94-b5ba12279ae9",
181
+ "metadata": {},
182
+ "source": [
183
+ "## App Settings/Constants"
184
  ]
185
  },
186
  {
187
  "cell_type": "code",
188
+ "execution_count": 6,
189
  "id": "ecb7f207-0fc2-4d19-a313-356c05776832",
190
  "metadata": {},
191
+ "outputs": [
192
+ {
193
+ "name": "stdout",
194
+ "output_type": "stream",
195
+ "text": [
196
+ "TEMP Dir: /tmp\n"
197
+ ]
198
+ }
199
+ ],
200
  "source": [
201
  "#| export\n",
202
  "TEMP = os.environ.get('GRADIO_TEMP_DIR','/tmp/')\n",
 
206
  },
207
  {
208
  "cell_type": "code",
209
+ "execution_count": 7,
210
+ "id": "e5d6cac2-0dee-42d8-9b41-184b5be9cc3f",
211
  "metadata": {},
212
  "outputs": [],
213
  "source": [
214
  "#| export\n",
215
+ "providers = dict()"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "execution_count": 8,
221
+ "id": "b77ad8d6-3289-463c-b213-1c0cc215b141",
222
+ "metadata": {},
223
+ "outputs": [
224
+ {
225
+ "name": "stdout",
226
+ "output_type": "stream",
227
+ "text": [
228
+ "Successfully added OpenAI as Provider\n"
229
+ ]
230
+ }
231
+ ],
232
+ "source": [
233
+ "#| export\n",
234
+ "# Add OpenAI as a provider\n",
235
  "try:\n",
236
+ " providers['openai'] = {\n",
237
+ " 'name': 'Open AI',\n",
238
+ " 'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},\n",
239
+ " 'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},\n",
240
+ " }\n",
241
+ " print('Successfully added OpenAI as Provider')\n",
242
+ "except Exception as e:\n",
243
+ " print(f\"\"\"Error: Failed to add OpenAI as a provider.\\nException: {repr(e)}\\nTRACEBACK:\\n\"\"\",traceback.format_exc())\n",
244
+ "# providers"
245
  ]
246
  },
247
  {
248
  "cell_type": "code",
249
+ "execution_count": 9,
250
+ "id": "87fca48b-a16a-4d2b-919c-75e88e4e5eb5",
251
  "metadata": {},
252
+ "outputs": [
253
+ {
254
+ "name": "stdout",
255
+ "output_type": "stream",
256
+ "text": [
257
+ "Successfully added Cartesia AI as Provider\n"
258
+ ]
259
+ }
260
+ ],
261
  "source": [
262
  "#| export\n",
263
+ "# Add Cartesia AI as a provider\n",
264
+ "try:\n",
265
+ " providers['cartesiaai'] = {\n",
266
+ " 'name': 'Cartesia AI',\n",
267
+ " 'models': {'upbeat-moon': 'Sonic Turbo English'},\n",
268
+ " 'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},\n",
269
+ " }\n",
270
+ " print('Successfully added Cartesia AI as Provider')\n",
271
+ "except Exception as e:\n",
272
+ " print(f\"\"\"Error: Failed to add Cartesia AI as a provider.\\nException: {repr(e)}\\nTRACEBACK:\\n\"\"\",traceback.format_exc())\n",
273
+ "# providers"
274
+ ]
275
+ },
276
+ {
277
+ "cell_type": "markdown",
278
+ "id": "6bd2e9ed-9dbd-4d5f-a814-2942108b5935",
279
+ "metadata": {},
280
+ "source": [
281
+ "EXAMPLE: providers\n",
282
+ "```python\n",
283
+ "{'openai': {'name': 'Open AI',\n",
284
+ " 'models': {'tts-1-hd-1106': 'tts-1-hd-1106',\n",
285
+ " 'tts-1-hd': 'tts-1-hd',\n",
286
+ " 'tts-1': 'tts-1',\n",
287
+ " 'tts-1-1106': 'tts-1-1106'},\n",
288
+ " 'voices': {'alloy': {'id': 'alloy', 'name': 'Alloy'},\n",
289
+ " 'echo': {'id': 'echo', 'name': 'Echo'},\n",
290
+ " 'fable': {'id': 'fable', 'name': 'Fable'},\n",
291
+ " 'onyx': {'id': 'onyx', 'name': 'Onyx'},\n",
292
+ " 'nova': {'id': 'nova', 'name': 'Nova'},\n",
293
+ " 'shimmer': {'id': 'shimmer', 'name': 'Shimmer'}}},\n",
294
+ " 'cartesiaai': {'name': 'Cartesia AI',\n",
295
+ " 'models': {'upbeat-moon': 'Sonic Turbo English'},\n",
296
+ " 'voices': {'3b554273-4299-48b9-9aaf-eefd438e3941': {'id': '3b554273-4299-48b9-9aaf-eefd438e3941',\n",
297
+ " 'user_id': None,\n",
298
+ " 'is_public': True,\n",
299
+ " 'name': 'Indian Lady',\n",
300
+ " 'description': 'This voice is young, rich, and curious, perfect for a narrator or fictional character',\n",
301
+ " 'created_at': '2024-05-04T18:48:17.006441-07:00',\n",
302
+ " 'embedding': [0.015546328,-0.11384969,0.14146514, ...]},\n",
303
+ " '63ff761f-c1e8-414b-b969-d1833d1c870c': {'id': '63ff761f-c1e8-414b-b969-d1833d1c870c',\n",
304
+ " 'user_id': None,\n",
305
+ " 'is_public': True,\n",
306
+ " 'name': 'Confident British Man',\n",
307
+ " 'description': 'This voice is disciplined with a British accent, perfect for a commanding character or narrator',\n",
308
+ " 'created_at': '2024-05-04T18:57:31.399193-07:00',\n",
309
+ " 'embedding': [-0.056990184,-0.06531749,-0.05618861,...]}\n",
310
+ " }\n",
311
+ "}\n",
312
+ "```"
313
  ]
314
  },
315
  {
316
  "cell_type": "code",
317
+ "execution_count": 10,
318
  "id": "8eb7e7d5-7121-4762-b8d1-e5a9539e2b36",
319
  "metadata": {},
320
  "outputs": [],
 
325
  },
326
  {
327
  "cell_type": "code",
328
+ "execution_count": 11,
329
  "id": "52d373be-3a79-412e-8ca2-92bb443fa52d",
330
  "metadata": {},
331
  "outputs": [],
332
  "source": [
333
  "#| export\n",
334
  "#Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.\n",
335
+ "OPENAI_CLIENT_TTS_THREADS = 10 \n",
336
+ "CARTESIAAI_CLIENT_TTS_THREADS = 3\n",
337
+ "\n",
338
+ "DEFAULT_PROVIDER = 'openai'\n",
339
+ "DEFAULT_MODEL = 'tts-1'\n",
340
+ "DEFAULT_VOICE = 'alloy'"
341
+ ]
342
+ },
343
+ {
344
+ "cell_type": "markdown",
345
+ "id": "e6400d8e-49e8-41b8-ad0e-18bc032682b6",
346
+ "metadata": {},
347
+ "source": [
348
+ "# Main Implementation"
349
  ]
350
  },
351
  {
352
  "cell_type": "code",
353
+ "execution_count": 12,
354
+ "id": "b5b29507-92bc-453d-bcc5-6402c17e9a0d",
355
+ "metadata": {},
356
+ "outputs": [],
357
+ "source": [
358
+ "#| export\n",
359
+ "def verify_authorization(profile: gr.OAuthProfile=None) -> str:\n",
360
+ " print('Profile:', profile)\n",
361
+ " if REQUIRE_AUTH == False:\n",
362
+ " return 'WARNING_NO_AUTH_REQUIRED_LOCAL'\n",
363
+ " elif profile is not None and profile.username in [\"matdmiller\"]:\n",
364
+ " return f\"{profile.username}\"\n",
365
+ " else:\n",
366
+ " # print('Unauthorized',profile)\n",
367
+ " raise PermissionError(f'Your huggingface username ({profile}) is not authorized. Must be set in ALLOWED_OAUTH_PROFILE_USERNAMES environment variable.')\n",
368
+ " return None"
369
+ ]
370
+ },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": 13,
374
  "id": "24674094-4d47-4e48-b591-55faabcff8df",
375
  "metadata": {},
376
  "outputs": [],
 
411
  },
412
  {
413
  "cell_type": "code",
414
+ "execution_count": 14,
415
  "id": "e6224ae5-3792-42b2-8392-3abd42998a50",
416
  "metadata": {},
417
  "outputs": [],
418
  "source": [
419
  "#| export\n",
420
+ "def concatenate_mp3(mp3_files:list):\n",
421
+ "\n",
 
 
422
  " # Initialize an empty AudioSegment object for concatenation\n",
423
  " combined = AudioSegment.empty()\n",
424
  " \n",
 
454
  },
455
  {
456
  "cell_type": "code",
457
+ "execution_count": 15,
458
  "id": "4691703d-ed0f-4481-8006-b2906289b780",
459
  "metadata": {},
460
  "outputs": [],
 
472
  " return chunk_idx, response.content"
473
  ]
474
  },
475
+ {
476
+ "cell_type": "markdown",
477
+ "id": "6b1a0a8a-0ff6-44fa-b85c-c80c56bd3a24",
478
+ "metadata": {},
479
+ "source": [
480
+ "```python\n",
481
+ "client.generate(\n",
482
+ " *,\n",
483
+ " transcript: str,\n",
484
+ " voice: List[float],\n",
485
+ " model_id: str = '',\n",
486
+ " duration: int = None,\n",
487
+ " chunk_time: float = None,\n",
488
+ " stream: bool = False,\n",
489
+ " websocket: bool = True,\n",
490
+ " output_format: Union[str, cartesia._types.AudioOutputFormat] = 'fp32',\n",
491
+ " data_rtype: str = 'bytes',\n",
492
+ ") -> Union[cartesia._types.AudioOutput, Generator[cartesia._types.AudioOutput, NoneType, NoneType]]\n",
493
+ "\n",
494
+ "list(cartesia._types.AudioOutputFormat)\n",
495
+ "[<AudioOutputFormat.FP32: 'fp32'>,\n",
496
+ " <AudioOutputFormat.PCM: 'pcm'>,\n",
497
+ " <AudioOutputFormat.FP32_16000: 'fp32_16000'>,\n",
498
+ " <AudioOutputFormat.FP32_22050: 'fp32_22050'>,\n",
499
+ " <AudioOutputFormat.FP32_44100: 'fp32_44100'>,\n",
500
+ " <AudioOutputFormat.PCM_16000: 'pcm_16000'>,\n",
501
+ " <AudioOutputFormat.PCM_22050: 'pcm_22050'>,\n",
502
+ " <AudioOutputFormat.PCM_44100: 'pcm_44100'>,\n",
503
+ " <AudioOutputFormat.MULAW_8000: 'mulaw_8000'>]\n",
504
+ "```"
505
+ ]
506
+ },
507
  {
508
  "cell_type": "code",
509
+ "execution_count": 16,
510
+ "id": "3420c868-71cb-4ac6-ac65-6f02bfd841d1",
511
+ "metadata": {},
512
+ "outputs": [],
513
+ "source": [
514
+ "#| export\n",
515
+ "def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon', \n",
516
+ " voice='248be419-c632-4f23-adf1-5324ed7dbf1d', #Hannah\n",
517
+ " websocket=False, output_format='pcm_44100', **kwargs):\n",
518
+ " client = cartesia.tts.CartesiaTTS()\n",
519
+ " \n",
520
+ " @retry(wait=wait_random_exponential(min=1, max=180), stop=stop_after_attempt(6))\n",
521
+ " def _create_speech_with_backoff(**kwargs):\n",
522
+ " return client.generate(**kwargs)\n",
523
+ " \n",
524
+ " response = _create_speech_with_backoff(transcript=input, model_id=model, voice_id=voice, \n",
525
+ " websocket=websocket, output_format=output_format, **kwargs)\n",
526
+ " client.close()\n",
527
+ " return chunk_idx, response[\"audio\"]"
528
+ ]
529
+ },
530
+ {
531
+ "cell_type": "code",
532
+ "execution_count": 17,
533
  "id": "e34bb4aa-698c-4452-8cda-bd02b38f7122",
534
  "metadata": {},
535
  "outputs": [],
536
  "source": [
537
  "#| export\n",
538
+ "def create_speech(input_text, provider, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress(), **kwargs):\n",
539
+ "\n",
540
+ " verify_authorization(profile)\n",
541
  " start = datetime.now()\n",
542
+ "\n",
543
+ " \n",
544
+ " if provider == 'cartesiaai':\n",
545
+ " create_speech_func = create_speech_cartesiaai\n",
546
+ " max_chunk_size = 500\n",
547
+ " chunk_processing_time = 20\n",
548
+ " threads = CARTESIAAI_CLIENT_TTS_THREADS\n",
549
+ " elif provider == 'openai':\n",
550
+ " create_speech_func = create_speech_openai\n",
551
+ " max_chunk_size = 4000\n",
552
+ " chunk_processing_time = 60\n",
553
+ " threads = OPENAI_CLIENT_TTS_THREADS\n",
554
+ " else:\n",
555
+ " raise ValueError(f'Invalid argument provider: {provider}')\n",
556
+ " \n",
557
  " # Split the input text into chunks\n",
558
+ " chunks = split_text(input_text, max_length=max_chunk_size)\n",
559
  "\n",
560
  " # Initialize the progress bar\n",
561
+ " progress(0, desc=f\"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.\")\n",
562
  "\n",
563
  " # Initialize a list to hold the audio data of each chunk\n",
564
  " audio_data = []\n",
565
  "\n",
566
  " # Process each chunk\n",
567
+ " with ThreadPool(processes=threads) as pool:\n",
568
  " results = pool.starmap(\n",
569
+ " partial(create_speech_func, model=model, voice=voice, **kwargs), \n",
570
  " zip(range(len(chunks)),chunks)\n",
571
  " )\n",
572
  " audio_data = [o[1] for o in sorted(results)]\n",
 
587
  },
588
  {
589
  "cell_type": "code",
590
+ "execution_count": 18,
591
+ "id": "ca2c6f8c-62ed-4ac1-9c2f-e3b2bfb47e8d",
592
  "metadata": {},
593
  "outputs": [],
594
  "source": [
595
+ "# create_speech(\"Hi. What's your name?\", provider='openai', model='tts-1', voice='alloy')"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596
  ]
597
  },
598
  {
599
  "cell_type": "code",
600
+ "execution_count": 19,
601
  "id": "236dd8d3-4364-4731-af93-7dcdec6f18a1",
602
  "metadata": {},
603
  "outputs": [],
 
609
  },
610
  {
611
  "cell_type": "code",
612
+ "execution_count": 20,
613
  "id": "0523a158-ee07-48b3-9350-ee39d4deee7f",
614
  "metadata": {},
615
  "outputs": [],
616
  "source": [
617
  "#| export\n",
618
+ "def get_generation_cost(input_text, tts_model_dropdown, provider):\n",
619
  " text_len = len(input_text)\n",
620
+ " if provider == 'openai':\n",
621
+ " if tts_model_dropdown.endswith('-hd'):\n",
622
+ " cost = text_len/1000 * 0.03\n",
623
+ " else:\n",
624
+ " cost = text_len/1000 * 0.015\n",
625
+ " elif provider == 'cartesiaai':\n",
626
+ " cost = text_len/1000 * 0.065\n",
627
  " else:\n",
628
+ " raise ValueError(f'Invalid argument provider: {provider}')\n",
629
  " return \"${:,.3f}\".format(cost)"
630
  ]
631
  },
632
  {
633
  "cell_type": "code",
634
+ "execution_count": 21,
635
+ "id": "f4d1ba0b-6960-4e22-8dba-7de70370753a",
636
  "metadata": {},
637
  "outputs": [],
638
  "source": [
639
  "#| export\n",
640
+ "def get_model_choices(provider):\n",
641
+ " return sorted([(v,k) for k,v in providers[provider]['models'].items()])"
 
 
 
 
 
642
  ]
643
  },
644
  {
645
  "cell_type": "code",
646
+ "execution_count": 22,
647
+ "id": "efa28cf2-548d-439f-bf2a-21a5edbf9eba",
648
+ "metadata": {},
649
+ "outputs": [],
650
+ "source": [
651
+ "#| export\n",
652
+ "def update_model_choices(provider):\n",
653
+ " choices = get_model_choices(provider)\n",
654
+ " return gr.update(choices=choices,value=choices[0])"
655
+ ]
656
+ },
657
+ {
658
+ "cell_type": "code",
659
+ "execution_count": 23,
660
+ "id": "cdc1dde5-5edd-4dbf-bd11-30eb418c571d",
661
+ "metadata": {},
662
+ "outputs": [],
663
+ "source": [
664
+ "#| export\n",
665
+ "def get_voice_choices(provider, model):\n",
666
+ " return sorted([(v['name'],v['id']) for v in providers[provider]['voices'].values()])"
667
+ ]
668
+ },
669
+ {
670
+ "cell_type": "code",
671
+ "execution_count": 24,
672
+ "id": "035c33dd-c8e6-42b4-91d4-6bc5f1b36df3",
673
  "metadata": {},
674
  "outputs": [],
675
  "source": [
676
  "#| export\n",
677
+ "def update_voice_choices(provider, model):\n",
678
+ " choices = get_voice_choices(provider, model)\n",
679
+ " return gr.update(choices=choices,value=choices[0])"
680
+ ]
681
+ },
682
+ {
683
+ "cell_type": "code",
684
+ "execution_count": 25,
685
+ "id": "e4fb3159-579b-4271-bc96-4cd1e2816eca",
686
+ "metadata": {},
687
+ "outputs": [
688
+ {
689
+ "name": "stderr",
690
+ "output_type": "stream",
691
+ "text": [
692
+ "/Users/mathewmiller/anaconda3/envs/gradio1/lib/python3.11/site-packages/gradio/utils.py:1000: UserWarning: Expected 3 arguments for function <function get_generation_cost at 0x1174f2e80>, received 4.\n",
693
+ " warnings.warn(\n",
694
+ "/Users/mathewmiller/anaconda3/envs/gradio1/lib/python3.11/site-packages/gradio/utils.py:1008: UserWarning: Expected maximum 3 arguments for function <function get_generation_cost at 0x1174f2e80>, received 4.\n",
695
+ " warnings.warn(\n"
696
+ ]
697
+ }
698
+ ],
699
+ "source": [
700
+ "#| export\n",
701
+ "with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:\n",
702
+ " gr.Markdown(\"# TTS\")\n",
703
+ " gr.Markdown(\"\"\"Start typing below and then click **Go** to create the speech from your text.\n",
704
+ "For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href=\"https://matdmiller-tts-openai.hf.space/\" target=\"_blank\">Spaces Link HTML</a>\"\"\")\n",
705
  " with gr.Row():\n",
706
  " input_text = gr.Textbox(max_lines=100, label=\"Enter text here\")\n",
707
  " with gr.Row():\n",
708
+ " tts_provider_dropdown = gr.Dropdown(value=DEFAULT_PROVIDER,choices=[(v,k) for k,v in providers.items()], label='Provider')\n",
709
+ " tts_model_dropdown = gr.Dropdown(value=DEFAULT_MODEL,choices=get_model_choices(DEFAULT_PROVIDER), label='Model')\n",
710
+ " tts_voice_dropdown = gr.Dropdown(value=DEFAULT_VOICE,choices=get_voice_choices(DEFAULT_PROVIDER, DEFAULT_MODEL),label='Voice')\n",
711
  " input_text_length = gr.Label(label=\"Number of characters\")\n",
712
  " generation_cost = gr.Label(label=\"Generation cost\")\n",
713
+ " with gr.Row():\n",
714
  " output_audio = gr.Audio()\n",
715
+ "\n",
716
+ " #input_text \n",
717
  " input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)\n",
718
+ " input_text.input(fn=get_generation_cost, \n",
719
+ " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown, tts_provider_dropdown], \n",
720
+ " outputs=tts_voice_dropdown)\n",
721
+ "\n",
722
+ " tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown], \n",
723
+ " outputs=tts_model_dropdown)\n",
724
+ " tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown], \n",
725
+ " outputs=tts_voice_dropdown)\n",
726
+ " \n",
727
+ " tts_model_dropdown.change(fn=get_generation_cost, \n",
728
+ " inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)\n",
729
+ " \n",
730
  " go_btn = gr.Button(\"Go\")\n",
731
+ " go_btn.click(fn=create_speech, \n",
732
+ " inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown], \n",
733
+ " outputs=[output_audio])\n",
734
+ " \n",
735
  " clear_btn = gr.Button('Clear')\n",
736
  " clear_btn.click(fn=lambda: '', outputs=input_text)\n",
737
  "\n",
738
+ " if REQUIRE_AUTH:\n",
739
+ " gr.LoginButton()\n",
740
+ " m = gr.Markdown('')\n",
741
+ " app.load(verify_authorization, None, m)\n",
742
  " "
743
  ]
744
  },
745
  {
746
  "cell_type": "code",
747
+ "execution_count": 26,
748
  "id": "a00648a1-891b-470b-9959-f5d502055713",
749
  "metadata": {},
750
  "outputs": [],
 
758
  },
759
  {
760
  "cell_type": "code",
761
+ "execution_count": 27,
762
  "id": "4b534fe7-4337-423e-846a-1bdb7cccc4ea",
763
  "metadata": {},
764
+ "outputs": [
765
+ {
766
+ "name": "stdout",
767
+ "output_type": "stream",
768
+ "text": [
769
+ "Running on local URL: http://127.0.0.1:7860\n",
770
+ "\n",
771
+ "To create a public link, set `share=True` in `launch()`.\n"
772
+ ]
773
+ },
774
+ {
775
+ "data": {
776
+ "text/html": [
777
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
778
+ ],
779
+ "text/plain": [
780
+ "<IPython.core.display.HTML object>"
781
+ ]
782
+ },
783
+ "metadata": {},
784
+ "output_type": "display_data"
785
+ },
786
+ {
787
+ "data": {
788
+ "text/plain": []
789
+ },
790
+ "execution_count": 27,
791
+ "metadata": {},
792
+ "output_type": "execute_result"
793
+ }
794
+ ],
795
  "source": [
796
  "#| hide\n",
797
  "#Notebook launch\n",
 
815
  },
816
  {
817
  "cell_type": "code",
818
+ "execution_count": 28,
819
  "id": "28e8d888-e790-46fa-bbac-4511b9ab796c",
820
  "metadata": {},
821
+ "outputs": [
822
+ {
823
+ "name": "stdout",
824
+ "output_type": "stream",
825
+ "text": [
826
+ "Closing server running on port: 7860\n"
827
+ ]
828
+ }
829
+ ],
830
  "source": [
831
  "#| hide\n",
832
  "app.close()"
 
834
  },
835
  {
836
  "cell_type": "code",
837
+ "execution_count": 2,
838
  "id": "afbc9699-4d16-4060-88f4-cd1251754cbd",
839
  "metadata": {},
840
+ "outputs": [
841
+ {
842
+ "ename": "NameError",
843
+ "evalue": "name 'gr' is not defined",
844
+ "output_type": "error",
845
+ "traceback": [
846
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
847
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
848
+ "Cell \u001b[0;32mIn[2], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m#| hide\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m \u001b[43mgr\u001b[49m\u001b[38;5;241m.\u001b[39mclose_all()\n",
849
+ "\u001b[0;31mNameError\u001b[0m: name 'gr' is not defined"
850
+ ]
851
+ }
852
+ ],
853
  "source": [
854
  "#| hide\n",
855
  "gr.close_all()"
 
857
  },
858
  {
859
  "cell_type": "code",
860
+ "execution_count": 30,
861
  "id": "0420310d-930b-4904-8bd4-3458ad8bdbd3",
862
  "metadata": {},
863
  "outputs": [],
app.py CHANGED
@@ -1,35 +1,54 @@
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
- __all__ = ['secret_import_failed', 'TEMP', 'TEMP_DIR', 'tts_voices', 'clean_text_prompt', 'OPENAI_CLIENT_TTS_THREADS',
5
- 'launch_kwargs', 'queue_kwargs', 'split_text', 'concatenate_mp3', 'create_speech_openai', 'create_speech2',
6
- 'create_speech', 'get_input_text_len', 'get_generation_cost', 'authorized']
7
-
8
- # %% app.ipynb 1
9
- #tts_openai_secrets.py content:
10
- #import os
11
- #os.environ['OPENAI_API_KEY'] = 'sk-XXXXXXXXXXXXXXXXXXXXXX'
12
  import os
13
  secret_import_failed = False
14
  try:
 
15
  _ = os.environ['OPENAI_API_KEY']
16
  print('OPENAI_API_KEY environment variable was found.')
17
  except:
18
  print('OPENAI_API_KEY environment variable was not found.')
19
  secret_import_failed = True
20
  try:
21
- GRADIO_PASSWORD = os.environ['GRADIO_PASSWORD']
22
- print('GRADIO_PASSWORD environment variable was found.')
 
 
 
 
 
 
23
  except:
24
- print('GRADIO_PASSWORD environment variable was not found.')
25
  secret_import_failed = True
26
 
27
  if secret_import_failed == True:
28
  import tts_openai_secrets
29
- GRADIO_PASSWORD = os.environ['GRADIO_PASSWORD']
 
 
30
  print('import tts_openai_secrets succeeded')
31
 
32
- # %% app.ipynb 3
 
 
 
 
 
 
 
 
 
 
 
33
  import gradio as gr
34
  import openai
35
  from pydub import AudioSegment
@@ -45,31 +64,70 @@ from tenacity import (
45
  stop_after_attempt,
46
  wait_random_exponential,
47
  ) # for exponential backoff
 
 
 
48
 
49
- # %% app.ipynb 4
50
  TEMP = os.environ.get('GRADIO_TEMP_DIR','/tmp/')
51
  TEMP_DIR = Path(TEMP)
52
  print('TEMP Dir:', TEMP_DIR)
53
 
54
- # %% app.ipynb 5
 
 
 
 
55
  try:
56
- tts_models = [o.id for o in openai.models.list().data if 'tts' in o.id]
57
- print('successfully got tts model list:', tts_models)
58
- except:
59
- tts_models = ['tts-1']
 
 
 
 
 
60
 
61
- # %% app.ipynb 6
62
- tts_voices = ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']
 
 
 
 
 
 
 
 
 
 
63
 
64
- # %% app.ipynb 7
65
  clean_text_prompt = """Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early."""
66
 
67
 
68
- # %% app.ipynb 8
69
  #Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.
70
  OPENAI_CLIENT_TTS_THREADS = 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- # %% app.ipynb 9
73
  def split_text(input_text, max_length=4000, lookback=1000):
74
  # If the text is shorter than the max_length, return it as is
75
  if len(input_text) <= max_length:
@@ -102,11 +160,9 @@ def split_text(input_text, max_length=4000, lookback=1000):
102
 
103
  return chunks
104
 
105
- # %% app.ipynb 10
106
- def concatenate_mp3(mp3_files):
107
- # if len(mp3_files) == 1:
108
- # return mp3_files[0]
109
- # else:
110
  # Initialize an empty AudioSegment object for concatenation
111
  combined = AudioSegment.empty()
112
 
@@ -139,7 +195,7 @@ def concatenate_mp3(mp3_files):
139
  print('Saving mp3 file to temp directory: ', filepath)
140
  return str(filepath)
141
 
142
- # %% app.ipynb 11
143
  def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):
144
  client = openai.OpenAI()
145
 
@@ -151,24 +207,54 @@ def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1
151
  client.close()
152
  return chunk_idx, response.content
153
 
154
- # %% app.ipynb 12
155
- def create_speech2(input_text, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress(), **kwargs):
156
- print('cs2-profile:',profile)
157
- assert authorized(profile) is not None,'Unauthorized M'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  start = datetime.now()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # Split the input text into chunks
160
- chunks = split_text(input_text)
161
 
162
  # Initialize the progress bar
163
- progress(0, desc=f"Started processing {len(chunks)} text chunks using {OPENAI_CLIENT_TTS_THREADS} threads. ETA is ~{ceil(len(chunks)/OPENAI_CLIENT_TTS_THREADS)} min.")
164
 
165
  # Initialize a list to hold the audio data of each chunk
166
  audio_data = []
167
 
168
  # Process each chunk
169
- with ThreadPool(processes=OPENAI_CLIENT_TTS_THREADS) as pool:
170
  results = pool.starmap(
171
- partial(create_speech_openai, model=model, voice=voice, **kwargs),
172
  zip(range(len(chunks)),chunks)
173
  )
174
  audio_data = [o[1] for o in sorted(results)]
@@ -187,105 +273,93 @@ def create_speech2(input_text, model='tts-1', voice='alloy', profile: gr.OAuthPr
187
  return combined_audio
188
 
189
 
190
- # %% app.ipynb 13
191
- def create_speech(input_text, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress()):
192
- assert authorized(profile) is not None,'Unauthorized M'
193
- # Split the input text into chunks
194
- chunks = split_text(input_text)
195
-
196
- # Initialize the progress bar
197
- progress(0, desc="Starting TTS processing...")
198
-
199
- # Initialize a list to hold the audio data of each chunk
200
- audio_data = []
201
-
202
- # Create a client instance for OpenAI
203
- client = openai.OpenAI()
204
-
205
- # Calculate the progress increment for each chunk
206
- progress_increment = 1.0 / len(chunks)
207
-
208
- # Process each chunk
209
- for i, chunk in enumerate(chunks):
210
- response = client.audio.speech.create(
211
- model=model,
212
- voice=voice,
213
- input=chunk,
214
- speed=1.0
215
- )
216
- # Append the audio content of the response to the list
217
- audio_data.append(response.content)
218
-
219
- # Update the progress bar
220
- progress((i + 1) * progress_increment, desc=f"Processing chunk {i + 1} of {len(chunks)}")
221
-
222
- # Close the client connection
223
- client.close()
224
-
225
- # Concatenate the audio data from all chunks
226
- combined_audio = concatenate_mp3(audio_data)
227
-
228
- # Final update to the progress bar
229
- progress(1, desc="Processing completed")
230
-
231
- return combined_audio
232
-
233
-
234
- # %% app.ipynb 14
235
  def get_input_text_len(input_text):
236
  return len(input_text)
237
 
238
- # %% app.ipynb 15
239
- def get_generation_cost(input_text, tts_model_dropdown):
240
  text_len = len(input_text)
241
- if tts_model_dropdown.endswith('-hd'):
242
- cost = text_len/1000 * 0.03
 
 
 
 
 
243
  else:
244
- cost = text_len/1000 * 0.015
245
  return "${:,.3f}".format(cost)
246
 
247
- # %% app.ipynb 16
248
- def authorized(profile: gr.OAuthProfile=None) -> str:
249
- print('Profile:', profile)
250
- if profile is not None and profile.username in ["matdmiller"]:
251
- return f"{profile.username}"
252
- else:
253
- print('Unauthorized',profile)
254
- return None
255
-
256
- # %% app.ipynb 17
257
- with gr.Blocks(title='OpenAI TTS', head='OpenAI TTS', delete_cache=(3600,3600)) as app:
258
- gr.Markdown("# OpenAI TTS")
259
- gr.Markdown("""Start typing below and then click **Go** to create the speech from your text. The current limit is 4,000 characters.
260
- For requests longer than 4,000 chars they will be broken into chunks of 4,000 or less chars automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/)""")
 
 
 
 
 
 
 
 
 
261
  with gr.Row():
262
  input_text = gr.Textbox(max_lines=100, label="Enter text here")
263
  with gr.Row():
264
- tts_model_dropdown = gr.Dropdown(value='tts-1',choices=tts_models, label='Model')
265
- tts_voice_dropdown = gr.Dropdown(value='alloy',choices=tts_voices,label='Voice')
 
266
  input_text_length = gr.Label(label="Number of characters")
267
  generation_cost = gr.Label(label="Generation cost")
 
268
  output_audio = gr.Audio()
 
 
269
  input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)
270
- input_text.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
271
- tts_model_dropdown.input(fn=get_generation_cost, inputs=[input_text,tts_model_dropdown], outputs=generation_cost)
 
 
 
 
 
 
 
 
 
 
272
  go_btn = gr.Button("Go")
273
- go_btn.click(fn=create_speech2, inputs=[input_text, tts_model_dropdown, tts_voice_dropdown], outputs=[output_audio])
 
 
 
274
  clear_btn = gr.Button('Clear')
275
  clear_btn.click(fn=lambda: '', outputs=input_text)
276
 
277
- gr.LoginButton()
278
- m = gr.Markdown('')
279
- app.load(authorized, None, m)
 
280
 
281
 
282
- # %% app.ipynb 18
283
  # launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
284
  # 'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
285
  launch_kwargs = {}
286
  queue_kwargs = {'default_concurrency_limit':10}
287
 
288
- # %% app.ipynb 20
289
  #.py launch
290
  if __name__ == "__main__":
291
  app.queue(**queue_kwargs)
 
1
  # AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb.
2
 
3
  # %% auto 0
4
+ __all__ = ['secret_import_failed', 'TEMP', 'TEMP_DIR', 'providers', 'clean_text_prompt', 'OPENAI_CLIENT_TTS_THREADS',
5
+ 'CARTESIAAI_CLIENT_TTS_THREADS', 'DEFAULT_PROVIDER', 'DEFAULT_MODEL', 'DEFAULT_VOICE', 'launch_kwargs',
6
+ 'queue_kwargs', 'verify_authorization', 'split_text', 'concatenate_mp3', 'create_speech_openai',
7
+ 'create_speech_cartesiaai', 'create_speech', 'get_input_text_len', 'get_generation_cost',
8
+ 'get_model_choices', 'update_model_choices', 'get_voice_choices', 'update_voice_choices']
9
+
10
+ # %% app.ipynb 4
 
11
  import os
12
  secret_import_failed = False
13
  try:
14
+ # don't need the openai api key in a variable
15
  _ = os.environ['OPENAI_API_KEY']
16
  print('OPENAI_API_KEY environment variable was found.')
17
  except:
18
  print('OPENAI_API_KEY environment variable was not found.')
19
  secret_import_failed = True
20
  try:
21
+ CARTESIA_API_KEY = os.environ['CARTESIA_API_KEY']
22
+ print('CARTESIA_API_KEY environment variable was found.')
23
+ except:
24
+ print('CARTESIA_API_KEY environment variable was not found.')
25
+ secret_import_failed = True
26
+ try:
27
+ ALLOWED_OAUTH_PROFILE_USERNAMES = os.environ['ALLOWED_OAUTH_PROFILE_USERNAMES']
28
+ print('ALLOWED_OAUTH_PROFILE_USERNAMES environment variable was found.')
29
  except:
30
+ print('ALLOWED_OAUTH_PROFILE_USERNAMES environment variable was not found.')
31
  secret_import_failed = True
32
 
33
  if secret_import_failed == True:
34
  import tts_openai_secrets
35
+ _ = os.environ['OPENAI_API_KEY']
36
+ CARTESIA_API_KEY = os.environ['CARTESIA_API_KEY']
37
+ ALLOWED_OAUTH_PROFILE_USERNAMES = os.environ['ALLOWED_OAUTH_PROFILE_USERNAMES']
38
  print('import tts_openai_secrets succeeded')
39
 
40
+ # %% app.ipynb 5
41
+ # If REQUIRE_AUTH environemnt variable is set to 'false' (from secrets) and HF_SPACE != 1 then we
42
+ # are running locally and don't require authentication and authorization, otherwise we do.
43
+ # We are using paid API's so don't want anybody/everybody to be able to use our paid services.
44
+ if os.environ.get("REQUIRE_AUTH",'true') == 'false' and os.environ.get('HF_SPACE',0) != 1:
45
+ REQUIRE_AUTH = False
46
+ else:
47
+ REQUIRE_AUTH = True
48
+ print('REQUIRE_AUTH:',REQUIRE_AUTH)
49
+
50
+ # %% app.ipynb 8
51
+ import os
52
  import gradio as gr
53
  import openai
54
  from pydub import AudioSegment
 
64
  stop_after_attempt,
65
  wait_random_exponential,
66
  ) # for exponential backoff
67
+ import traceback
68
+ # from cartesia.tts import CartesiaTTS
69
+ import cartesia
70
 
71
+ # %% app.ipynb 11
72
  TEMP = os.environ.get('GRADIO_TEMP_DIR','/tmp/')
73
  TEMP_DIR = Path(TEMP)
74
  print('TEMP Dir:', TEMP_DIR)
75
 
76
+ # %% app.ipynb 12
77
+ providers = dict()
78
+
79
+ # %% app.ipynb 13
80
+ # Add OpenAI as a provider
81
  try:
82
+ providers['openai'] = {
83
+ 'name': 'Open AI',
84
+ 'models': {o.id: o.id for o in openai.models.list().data if 'tts' in o.id},
85
+ 'voices': {o:{'id':o,'name':o.title()} for o in ['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']},
86
+ }
87
+ print('Successfully added OpenAI as Provider')
88
+ except Exception as e:
89
+ print(f"""Error: Failed to add OpenAI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
90
+ # providers
91
 
92
+ # %% app.ipynb 14
93
+ # Add Cartesia AI as a provider
94
+ try:
95
+ providers['cartesiaai'] = {
96
+ 'name': 'Cartesia AI',
97
+ 'models': {'upbeat-moon': 'Sonic Turbo English'},
98
+ 'voices': {v['id']:v for k,v in cartesia.tts.CartesiaTTS().get_voices().items()},
99
+ }
100
+ print('Successfully added Cartesia AI as Provider')
101
+ except Exception as e:
102
+ print(f"""Error: Failed to add Cartesia AI as a provider.\nException: {repr(e)}\nTRACEBACK:\n""",traceback.format_exc())
103
+ # providers
104
 
105
+ # %% app.ipynb 16
106
  clean_text_prompt = """Your job is to clean up text that is going to be fed into a text to speech (TTS) model. You must remove parts of the text that would not normally be spoken such as reference marks `[1]`, spurious citations such as `(Reddy et al., 2021; Wu et al., 2022; Chang et al., 2022; Kondratyuk et al., 2023)` and any other part of the text that is not normally spoken. Please also clean up sections and headers so they are on new lines with proper numbering. You must also clean up any math formulas that are salvageable from being copied from a scientific paper. If they are garbled and do not make sense then remove them. You must carefully perform the text cleanup so it is translated into speech that is easy to listen to however you must not modify the text otherwise. It is critical that you repeat all of the text without modifications except for the cleanup activities you've been instructed to do. Also you must clean all of the text you are given, you may not omit any of it or stop the cleanup task early."""
107
 
108
 
109
+ # %% app.ipynb 17
110
  #Number of threads created PER USER REQUEST. This throttels the # of API requests PER USER request. This is in ADDITION to the Gradio threads.
111
  OPENAI_CLIENT_TTS_THREADS = 10
112
+ CARTESIAAI_CLIENT_TTS_THREADS = 3
113
+
114
+ DEFAULT_PROVIDER = 'openai'
115
+ DEFAULT_MODEL = 'tts-1'
116
+ DEFAULT_VOICE = 'alloy'
117
+
118
+ # %% app.ipynb 19
119
+ def verify_authorization(profile: gr.OAuthProfile=None) -> str:
120
+ print('Profile:', profile)
121
+ if REQUIRE_AUTH == False:
122
+ return 'WARNING_NO_AUTH_REQUIRED_LOCAL'
123
+ elif profile is not None and profile.username in ["matdmiller"]:
124
+ return f"{profile.username}"
125
+ else:
126
+ # print('Unauthorized',profile)
127
+ raise PermissionError(f'Your huggingface username ({profile}) is not authorized. Must be set in ALLOWED_OAUTH_PROFILE_USERNAMES environment variable.')
128
+ return None
129
 
130
+ # %% app.ipynb 20
131
  def split_text(input_text, max_length=4000, lookback=1000):
132
  # If the text is shorter than the max_length, return it as is
133
  if len(input_text) <= max_length:
 
160
 
161
  return chunks
162
 
163
+ # %% app.ipynb 21
164
+ def concatenate_mp3(mp3_files:list):
165
+
 
 
166
  # Initialize an empty AudioSegment object for concatenation
167
  combined = AudioSegment.empty()
168
 
 
195
  print('Saving mp3 file to temp directory: ', filepath)
196
  return str(filepath)
197
 
198
+ # %% app.ipynb 22
199
  def create_speech_openai(chunk_idx, input, model='tts-1', voice='alloy', speed=1.0, **kwargs):
200
  client = openai.OpenAI()
201
 
 
207
  client.close()
208
  return chunk_idx, response.content
209
 
210
+ # %% app.ipynb 24
211
+ def create_speech_cartesiaai(chunk_idx, input, model='upbeat-moon',
212
+ voice='248be419-c632-4f23-adf1-5324ed7dbf1d', #Hannah
213
+ websocket=False, output_format='pcm_44100', **kwargs):
214
+ client = cartesia.tts.CartesiaTTS()
215
+
216
+ @retry(wait=wait_random_exponential(min=1, max=180), stop=stop_after_attempt(6))
217
+ def _create_speech_with_backoff(**kwargs):
218
+ return client.generate(**kwargs)
219
+
220
+ response = _create_speech_with_backoff(transcript=input, model_id=model, voice_id=voice,
221
+ websocket=websocket, output_format=output_format, **kwargs)
222
+ client.close()
223
+ return chunk_idx, response["audio"]
224
+
225
+ # %% app.ipynb 25
226
+ def create_speech(input_text, provider, model='tts-1', voice='alloy', profile: gr.OAuthProfile|None=None, progress=gr.Progress(), **kwargs):
227
+
228
+ verify_authorization(profile)
229
  start = datetime.now()
230
+
231
+
232
+ if provider == 'cartesiaai':
233
+ create_speech_func = create_speech_cartesiaai
234
+ max_chunk_size = 500
235
+ chunk_processing_time = 20
236
+ threads = CARTESIAAI_CLIENT_TTS_THREADS
237
+ elif provider == 'openai':
238
+ create_speech_func = create_speech_openai
239
+ max_chunk_size = 4000
240
+ chunk_processing_time = 60
241
+ threads = OPENAI_CLIENT_TTS_THREADS
242
+ else:
243
+ raise ValueError(f'Invalid argument provider: {provider}')
244
+
245
  # Split the input text into chunks
246
+ chunks = split_text(input_text, max_length=max_chunk_size)
247
 
248
  # Initialize the progress bar
249
+ progress(0, desc=f"Started processing {len(chunks)} text chunks using {threads} threads. ETA is ~{ceil(len(chunks)/threads)*chunk_processing_time/60.} min.")
250
 
251
  # Initialize a list to hold the audio data of each chunk
252
  audio_data = []
253
 
254
  # Process each chunk
255
+ with ThreadPool(processes=threads) as pool:
256
  results = pool.starmap(
257
+ partial(create_speech_func, model=model, voice=voice, **kwargs),
258
  zip(range(len(chunks)),chunks)
259
  )
260
  audio_data = [o[1] for o in sorted(results)]
 
273
  return combined_audio
274
 
275
 
276
+ # %% app.ipynb 27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  def get_input_text_len(input_text):
278
  return len(input_text)
279
 
280
+ # %% app.ipynb 28
281
+ def get_generation_cost(input_text, tts_model_dropdown, provider):
282
  text_len = len(input_text)
283
+ if provider == 'openai':
284
+ if tts_model_dropdown.endswith('-hd'):
285
+ cost = text_len/1000 * 0.03
286
+ else:
287
+ cost = text_len/1000 * 0.015
288
+ elif provider == 'cartesiaai':
289
+ cost = text_len/1000 * 0.065
290
  else:
291
+ raise ValueError(f'Invalid argument provider: {provider}')
292
  return "${:,.3f}".format(cost)
293
 
294
+ # %% app.ipynb 29
295
+ def get_model_choices(provider):
296
+ return sorted([(v,k) for k,v in providers[provider]['models'].items()])
297
+
298
+ # %% app.ipynb 30
299
+ def update_model_choices(provider):
300
+ choices = get_model_choices(provider)
301
+ return gr.update(choices=choices,value=choices[0])
302
+
303
+ # %% app.ipynb 31
304
+ def get_voice_choices(provider, model):
305
+ return sorted([(v['name'],v['id']) for v in providers[provider]['voices'].values()])
306
+
307
+ # %% app.ipynb 32
308
+ def update_voice_choices(provider, model):
309
+ choices = get_voice_choices(provider, model)
310
+ return gr.update(choices=choices,value=choices[0])
311
+
312
+ # %% app.ipynb 33
313
+ with gr.Blocks(title='TTS', head='TTS', delete_cache=(3600,3600)) as app:
314
+ gr.Markdown("# TTS")
315
+ gr.Markdown("""Start typing below and then click **Go** to create the speech from your text.
316
+ For requests longer than allowed by the API they will be broken into chunks automatically. [Spaces Link](https://matdmiller-tts-openai.hf.space/) | <a href="https://matdmiller-tts-openai.hf.space/" target="_blank">Spaces Link HTML</a>""")
317
  with gr.Row():
318
  input_text = gr.Textbox(max_lines=100, label="Enter text here")
319
  with gr.Row():
320
+ tts_provider_dropdown = gr.Dropdown(value=DEFAULT_PROVIDER,choices=[(v,k) for k,v in providers.items()], label='Provider')
321
+ tts_model_dropdown = gr.Dropdown(value=DEFAULT_MODEL,choices=get_model_choices(DEFAULT_PROVIDER), label='Model')
322
+ tts_voice_dropdown = gr.Dropdown(value=DEFAULT_VOICE,choices=get_voice_choices(DEFAULT_PROVIDER, DEFAULT_MODEL),label='Voice')
323
  input_text_length = gr.Label(label="Number of characters")
324
  generation_cost = gr.Label(label="Generation cost")
325
+ with gr.Row():
326
  output_audio = gr.Audio()
327
+
328
+ #input_text
329
  input_text.input(fn=get_input_text_len, inputs=input_text, outputs=input_text_length)
330
+ input_text.input(fn=get_generation_cost,
331
+ inputs=[input_text,tts_model_dropdown,tts_provider_dropdown, tts_provider_dropdown],
332
+ outputs=tts_voice_dropdown)
333
+
334
+ tts_provider_dropdown.change(fn=update_model_choices, inputs=[tts_provider_dropdown],
335
+ outputs=tts_model_dropdown)
336
+ tts_provider_dropdown.change(fn=update_voice_choices, inputs=[tts_provider_dropdown, tts_model_dropdown],
337
+ outputs=tts_voice_dropdown)
338
+
339
+ tts_model_dropdown.change(fn=get_generation_cost,
340
+ inputs=[input_text,tts_model_dropdown,tts_provider_dropdown], outputs=generation_cost)
341
+
342
  go_btn = gr.Button("Go")
343
+ go_btn.click(fn=create_speech,
344
+ inputs=[input_text, tts_provider_dropdown, tts_model_dropdown, tts_voice_dropdown],
345
+ outputs=[output_audio])
346
+
347
  clear_btn = gr.Button('Clear')
348
  clear_btn.click(fn=lambda: '', outputs=input_text)
349
 
350
+ if REQUIRE_AUTH:
351
+ gr.LoginButton()
352
+ m = gr.Markdown('')
353
+ app.load(verify_authorization, None, m)
354
 
355
 
356
+ # %% app.ipynb 34
357
  # launch_kwargs = {'auth':('username',GRADIO_PASSWORD),
358
  # 'auth_message':'Please log in to Mat\'s TTS App with username: username and password.'}
359
  launch_kwargs = {}
360
  queue_kwargs = {'default_concurrency_limit':10}
361
 
362
+ # %% app.ipynb 36
363
  #.py launch
364
  if __name__ == "__main__":
365
  app.queue(**queue_kwargs)
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- openai==1.25.1
2
- gradio==4.28.3
 
3
  pydub==0.25.1
4
- tenacity==8.2.3
 
1
+ openai==1.34.0
2
+ cartesia==0.1.1
3
+ gradio==4.36.1
4
  pydub==0.25.1
5
+ tenacity==8.3.0