ggoknar commited on
Commit
4d98613
1 Parent(s): 145f28e

remove unneded testfile

Browse files
Files changed (1) hide show
  1. mistral.ipynb +0 -578
mistral.ipynb DELETED
@@ -1,578 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 1,
6
- "id": "f8bdd950-1b95-4088-890a-94417292f6e1",
7
- "metadata": {},
8
- "outputs": [
9
- {
10
- "name": "stderr",
11
- "output_type": "stream",
12
- "text": [
13
- "[nltk_data] Downloading package punkt to /home/gorkem/nltk_data...\n",
14
- "[nltk_data] Package punkt is already up-to-date!\n",
15
- "2023-10-13 00:33:39.399490: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
16
- ]
17
- },
18
- {
19
- "name": "stdout",
20
- "output_type": "stream",
21
- "text": [
22
- "Downloading if not downloaded Coqui XTTS V1\n",
23
- " > tts_models/multilingual/multi-dataset/xtts_v1 is already downloaded.\n",
24
- " > Using model: xtts\n",
25
- "XTTS downloaded\n",
26
- "Loading XTTS\n",
27
- "[2023-10-13 00:34:12,573] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed info: version=0.8.3+f1e4fb0b, git-hash=f1e4fb0b, git-branch=HEAD\n",
28
- "[2023-10-13 00:34:12,587] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter replace_method is deprecated. This parameter is no longer needed, please remove from your call to DeepSpeed-inference\n",
29
- "[2023-10-13 00:34:12,589] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter mp_size is deprecated use tensor_parallel.tp_size instead\n",
30
- "[2023-10-13 00:34:12,590] [INFO] [logging.py:93:log_dist] [Rank -1] quantize_bits = 8 mlp_extra_grouping = False, quantize_groups = 1\n",
31
- "[2023-10-13 00:34:12,854] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed-Inference config: {'layer_id': 0, 'hidden_size': 1024, 'intermediate_size': 4096, 'heads': 16, 'num_hidden_layers': -1, 'fp16': False, 'pre_layer_norm': True, 'local_rank': -1, 'stochastic_mode': False, 'epsilon': 1e-05, 'mp_size': 1, 'q_int8': False, 'scale_attention': True, 'triangular_masking': True, 'local_attention': False, 'window_size': 1, 'rotary_dim': -1, 'rotate_half': False, 'rotate_every_two': True, 'return_tuple': True, 'mlp_after_attn': True, 'mlp_act_func_type': <ActivationFuncType.GELU: 1>, 'specialized_mode': False, 'training_mp_size': 1, 'bigscience_bloom': False, 'max_out_tokens': 1024, 'scale_attn_by_inverse_layer_idx': False, 'enable_qkv_quantization': False, 'use_mup': False, 'return_single_tuple': False}\n",
32
- "Done loading TTS\n",
33
- "Loaded as API: https://sanchit-gandhi-whisper-jax.hf.space/ ✔\n"
34
- ]
35
- }
36
- ],
37
- "source": [
38
- "from __future__ import annotations\n",
39
- "\n",
40
- "import os\n",
41
- "# By using XTTS you agree to CPML license https://coqui.ai/cpml\n",
42
- "os.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n",
43
- "\n",
44
- "import gradio as gr\n",
45
- "import numpy as np\n",
46
- "import torch\n",
47
- "import nltk # we'll use this to split into sentences\n",
48
- "nltk.download('punkt')\n",
49
- "import uuid\n",
50
- "\n",
51
- "import librosa\n",
52
- "import torchaudio\n",
53
- "from TTS.api import TTS\n",
54
- "from TTS.tts.configs.xtts_config import XttsConfig\n",
55
- "from TTS.tts.models.xtts import Xtts\n",
56
- "from TTS.utils.generic_utils import get_user_data_dir\n",
57
- "\n",
58
- "# This will trigger downloading model\n",
59
- "print(\"Downloading if not downloaded Coqui XTTS V1\")\n",
60
- "tts = TTS(\"tts_models/multilingual/multi-dataset/xtts_v1\")\n",
61
- "del tts\n",
62
- "print(\"XTTS downloaded\")\n",
63
- "\n",
64
- "print(\"Loading XTTS\")\n",
65
- "#Below will use model directly for inference\n",
66
- "model_path = os.path.join(get_user_data_dir(\"tts\"), \"tts_models--multilingual--multi-dataset--xtts_v1\")\n",
67
- "config = XttsConfig()\n",
68
- "config.load_json(os.path.join(model_path, \"config.json\"))\n",
69
- "model = Xtts.init_from_config(config)\n",
70
- "model.load_checkpoint(\n",
71
- " config,\n",
72
- " checkpoint_path=os.path.join(model_path, \"model.pth\"),\n",
73
- " vocab_path=os.path.join(model_path, \"vocab.json\"),\n",
74
- " eval=True,\n",
75
- " use_deepspeed=True\n",
76
- ")\n",
77
- "model.cuda()\n",
78
- "print(\"Done loading TTS\")\n",
79
- "\n",
80
- "\n",
81
- "title = \"Voice chat with Mistral 7B Instruct\"\n",
82
- "\n",
83
- "DESCRIPTION = \"\"\"# Voice chat with Mistral 7B Instruct\"\"\"\n",
84
- "css = \"\"\".toast-wrap { display: none !important } \"\"\"\n",
85
- "\n",
86
- "from huggingface_hub import HfApi\n",
87
- "HF_TOKEN = os.environ.get(\"HF_TOKEN\")\n",
88
- "# will use api to restart space on a unrecoverable error\n",
89
- "api = HfApi(token=HF_TOKEN)\n",
90
- "\n",
91
- "repo_id = \"ylacombe/voice-chat-with-lama\"\n",
92
- "\n",
93
- "system_message = \"\\nYou are a helpful, respectful and honest assistant. Your answers are short, ideally a few words long, if it is possible. Always answer as helpfully as possible, while being safe.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\"\n",
94
- "temperature = 0.9\n",
95
- "top_p = 0.6\n",
96
- "repetition_penalty = 1.2\n",
97
- "\n",
98
- "\n",
99
- "import gradio as gr\n",
100
- "import os\n",
101
- "import time\n",
102
- "\n",
103
- "import gradio as gr\n",
104
- "from transformers import pipeline\n",
105
- "import numpy as np\n",
106
- "\n",
107
- "from gradio_client import Client\n",
108
- "from huggingface_hub import InferenceClient\n",
109
- "\n",
110
- "\n",
111
- "# This client is down\n",
112
- "#whisper_client = Client(\"https://sanchit-gandhi-whisper-large-v2.hf.space/\")\n",
113
- "# Replacement whisper client, it may be time limited\n",
114
- "whisper_client = Client(\"https://sanchit-gandhi-whisper-jax.hf.space\")\n",
115
- "text_client = InferenceClient(\n",
116
- " \"mistralai/Mistral-7B-Instruct-v0.1\"\n",
117
- ")\n"
118
- ]
119
- },
120
- {
121
- "cell_type": "code",
122
- "execution_count": null,
123
- "id": "d8687cd2-e989-4db9-b16a-04ad9460e6f1",
124
- "metadata": {},
125
- "outputs": [
126
- {
127
- "name": "stdout",
128
- "output_type": "stream",
129
- "text": [
130
- "Running on local URL: http://127.0.0.1:7861\n",
131
- "\n",
132
- "To create a public link, set `share=True` in `launch()`.\n"
133
- ]
134
- },
135
- {
136
- "data": {
137
- "text/html": [
138
- "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
139
- ],
140
- "text/plain": [
141
- "<IPython.core.display.HTML object>"
142
- ]
143
- },
144
- "metadata": {},
145
- "output_type": "display_data"
146
- },
147
- {
148
- "name": "stdout",
149
- "output_type": "stream",
150
- "text": [
151
- "ERROR: Too many requests on mistral client\n"
152
- ]
153
- },
154
- {
155
- "name": "stderr",
156
- "output_type": "stream",
157
- "text": [
158
- "Traceback (most recent call last):\n",
159
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
160
- " output = await route_utils.call_process_api(\n",
161
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
162
- " output = await app.get_blocks().process_api(\n",
163
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
164
- " result = await self.call_function(\n",
165
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
166
- " prediction = await utils.async_iteration(iterator)\n",
167
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
168
- " return await iterator.__anext__()\n",
169
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
170
- " return await anyio.to_thread.run_sync(\n",
171
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
172
- " return await get_asynclib().run_sync_in_worker_thread(\n",
173
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
174
- " return await future\n",
175
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
176
- " result = context.run(func, *args)\n",
177
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
178
- " return next(iterator)\n",
179
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
180
- " yield from f(*args, **kwargs)\n",
181
- " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
182
- " text_to_generate = history[-1][1]\n",
183
- "TypeError: 'NoneType' object is not subscriptable\n"
184
- ]
185
- },
186
- {
187
- "name": "stdout",
188
- "output_type": "stream",
189
- "text": [
190
- "ERROR: Too many requests on mistral client\n"
191
- ]
192
- },
193
- {
194
- "name": "stderr",
195
- "output_type": "stream",
196
- "text": [
197
- "Traceback (most recent call last):\n",
198
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
199
- " output = await route_utils.call_process_api(\n",
200
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
201
- " output = await app.get_blocks().process_api(\n",
202
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
203
- " result = await self.call_function(\n",
204
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
205
- " prediction = await utils.async_iteration(iterator)\n",
206
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
207
- " return await iterator.__anext__()\n",
208
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
209
- " return await anyio.to_thread.run_sync(\n",
210
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
211
- " return await get_asynclib().run_sync_in_worker_thread(\n",
212
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
213
- " return await future\n",
214
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
215
- " result = context.run(func, *args)\n",
216
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
217
- " return next(iterator)\n",
218
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
219
- " yield from f(*args, **kwargs)\n",
220
- " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
221
- " text_to_generate = history[-1][1]\n",
222
- "TypeError: 'NoneType' object is not subscriptable\n"
223
- ]
224
- },
225
- {
226
- "name": "stdout",
227
- "output_type": "stream",
228
- "text": [
229
- "ERROR: Too many requests on mistral client\n"
230
- ]
231
- },
232
- {
233
- "name": "stderr",
234
- "output_type": "stream",
235
- "text": [
236
- "Traceback (most recent call last):\n",
237
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
238
- " output = await route_utils.call_process_api(\n",
239
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
240
- " output = await app.get_blocks().process_api(\n",
241
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
242
- " result = await self.call_function(\n",
243
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
244
- " prediction = await utils.async_iteration(iterator)\n",
245
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
246
- " return await iterator.__anext__()\n",
247
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
248
- " return await anyio.to_thread.run_sync(\n",
249
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
250
- " return await get_asynclib().run_sync_in_worker_thread(\n",
251
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
252
- " return await future\n",
253
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
254
- " result = context.run(func, *args)\n",
255
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
256
- " return next(iterator)\n",
257
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
258
- " yield from f(*args, **kwargs)\n",
259
- " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
260
- " text_to_generate = history[-1][1]\n",
261
- "TypeError: 'NoneType' object is not subscriptable\n"
262
- ]
263
- },
264
- {
265
- "name": "stdout",
266
- "output_type": "stream",
267
- "text": [
268
- "ERROR: Too many requests on mistral client\n"
269
- ]
270
- },
271
- {
272
- "name": "stderr",
273
- "output_type": "stream",
274
- "text": [
275
- "Traceback (most recent call last):\n",
276
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
277
- " output = await route_utils.call_process_api(\n",
278
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
279
- " output = await app.get_blocks().process_api(\n",
280
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
281
- " result = await self.call_function(\n",
282
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
283
- " prediction = await utils.async_iteration(iterator)\n",
284
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
285
- " return await iterator.__anext__()\n",
286
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
287
- " return await anyio.to_thread.run_sync(\n",
288
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
289
- " return await get_asynclib().run_sync_in_worker_thread(\n",
290
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
291
- " return await future\n",
292
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
293
- " result = context.run(func, *args)\n",
294
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
295
- " return next(iterator)\n",
296
- " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
297
- " yield from f(*args, **kwargs)\n",
298
- " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
299
- " text_to_generate = history[-1][1]\n",
300
- "TypeError: 'NoneType' object is not subscriptable\n"
301
- ]
302
- }
303
- ],
304
- "source": [
305
- "\n",
306
- "###### COQUI TTS FUNCTIONS ######\n",
307
- "def get_latents(speaker_wav):\n",
308
- " # create as function as we can populate here with voice cleanup/filtering\n",
309
- " gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)\n",
310
- " return gpt_cond_latent, diffusion_conditioning, speaker_embedding\n",
311
- "\n",
312
- "\n",
313
- "def format_prompt(message, history):\n",
314
- " prompt = \"<s>\"\n",
315
- " for user_prompt, bot_response in history:\n",
316
- " prompt += f\"[INST] {user_prompt} [/INST]\"\n",
317
- " prompt += f\" {bot_response}</s> \"\n",
318
- " prompt += f\"[INST] {message} [/INST]\"\n",
319
- " return prompt\n",
320
- "\n",
321
- "def generate(\n",
322
- " prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,\n",
323
- "):\n",
324
- " temperature = float(temperature)\n",
325
- " if temperature < 1e-2:\n",
326
- " temperature = 1e-2\n",
327
- " top_p = float(top_p)\n",
328
- "\n",
329
- " generate_kwargs = dict(\n",
330
- " temperature=temperature,\n",
331
- " max_new_tokens=max_new_tokens,\n",
332
- " top_p=top_p,\n",
333
- " repetition_penalty=repetition_penalty,\n",
334
- " do_sample=True,\n",
335
- " seed=42,\n",
336
- " )\n",
337
- "\n",
338
- " formatted_prompt = format_prompt(prompt, history)\n",
339
- "\n",
340
- " try:\n",
341
- " stream = text_client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)\n",
342
- " output = \"\"\n",
343
- " for response in stream:\n",
344
- " output += response.token.text\n",
345
- " yield output\n",
346
- "\n",
347
- " except Exception as e:\n",
348
- " if \"Too Many Requests\" in str(e):\n",
349
- " print(\"ERROR: Too many requests on mistral client\")\n",
350
- " gr.Warning(\"Unfortunately Mistral is unable to process\")\n",
351
- " output = \"Unfortuanately I am not able to process your request now !\"\n",
352
- " else:\n",
353
- " print(\"Unhandled Exception: \", str(e))\n",
354
- " gr.Warning(\"Unfortunately Mistral is unable to process\")\n",
355
- " output = \"I do not know what happened but I could not understand you .\"\n",
356
- " \n",
357
- " return output\n",
358
- "\n",
359
- "\n",
360
- "def transcribe(wav_path):\n",
361
- " \n",
362
- " # get first element from whisper_jax and strip it to delete begin and end space\n",
363
- " return whisper_client.predict(\n",
364
- "\t\t\t\twav_path,\t# str (filepath or URL to file) in 'inputs' Audio component\n",
365
- "\t\t\t\t\"transcribe\",\t# str in 'Task' Radio component\n",
366
- " False, # return_timestamps=False for whisper-jax https://gist.github.com/sanchit-gandhi/781dd7003c5b201bfe16d28634c8d4cf#file-whisper_jax_endpoint-py\n",
367
- "\t\t\t\tapi_name=\"/predict\"\n",
368
- " )[0].strip()\n",
369
- " \n",
370
- "\n",
371
- "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n",
372
- "\n",
373
- "\n",
374
- "def add_text(history, text):\n",
375
- " history = [] if history is None else history\n",
376
- " history = history + [(text, None)]\n",
377
- " return history, gr.update(value=\"\", interactive=False)\n",
378
- "\n",
379
- "\n",
380
- "def add_file(history, file):\n",
381
- " history = [] if history is None else history\n",
382
- " \n",
383
- " try:\n",
384
- " text = transcribe(\n",
385
- " file\n",
386
- " )\n",
387
- " print(\"Transcribed text:\",text)\n",
388
- " except Exception as e:\n",
389
- " print(str(e))\n",
390
- " gr.Warning(\"There was an issue with transcription, please try writing for now\")\n",
391
- " # Apply a null text on error\n",
392
- " text = \"Transcription seems failed, please tell me a joke about chickens\"\n",
393
- " \n",
394
- " history = history + [(text, None)]\n",
395
- " return history\n",
396
- "\n",
397
- "\n",
398
- "\n",
399
- "def bot(history, system_prompt=\"\"): \n",
400
- " history = [] if history is None else history\n",
401
- "\n",
402
- " if system_prompt == \"\":\n",
403
- " system_prompt = system_message\n",
404
- " \n",
405
- " history[-1][1] = \"\"\n",
406
- " for character in generate(history[-1][0], history[:-1]):\n",
407
- " history[-1][1] = character\n",
408
- " yield history \n",
409
- "\n",
410
- "\n",
411
- "def get_latents(speaker_wav):\n",
412
- " # Generate speaker embedding and latents for TTS\n",
413
- " gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)\n",
414
- " return gpt_cond_latent, diffusion_conditioning, speaker_embedding\n",
415
- "\n",
416
- "latent_map={}\n",
417
- "latent_map[\"Female_Voice\"] = get_latents(\"examples/female.wav\")\n",
418
- "\n",
419
- "def get_voice(prompt,language, latent_tuple,suffix=\"0\"):\n",
420
- " gpt_cond_latent,diffusion_conditioning, speaker_embedding = latent_tuple\n",
421
- " # Direct version\n",
422
- " t0 = time.time()\n",
423
- " out = model.inference(\n",
424
- " prompt,\n",
425
- " language,\n",
426
- " gpt_cond_latent,\n",
427
- " speaker_embedding,\n",
428
- " diffusion_conditioning\n",
429
- " )\n",
430
- " inference_time = time.time() - t0\n",
431
- " print(f\"I: Time to generate audio: {round(inference_time*1000)} milliseconds\")\n",
432
- " real_time_factor= (time.time() - t0) / out['wav'].shape[-1] * 24000\n",
433
- " print(f\"Real-time factor (RTF): {real_time_factor}\")\n",
434
- " wav_filename=f\"output_{suffix}.wav\"\n",
435
- " torchaudio.save(wav_filename, torch.tensor(out[\"wav\"]).unsqueeze(0), 24000)\n",
436
- " return wav_filename\n",
437
- "\n",
438
- "def generate_speech(history):\n",
439
- " text_to_generate = history[-1][1]\n",
440
- " text_to_generate = text_to_generate.replace(\"\\n\", \" \").strip()\n",
441
- " text_to_generate = nltk.sent_tokenize(text_to_generate)\n",
442
- "\n",
443
- " language = \"en\"\n",
444
- "\n",
445
- " wav_list = []\n",
446
- " for i,sentence in enumerate(text_to_generate):\n",
447
- " # Sometimes prompt </s> coming on output remove it \n",
448
- " sentence= sentence.replace(\"</s>\",\"\")\n",
449
- " # A fast fix for last chacter, may produce weird sounds if it is with text\n",
450
- " if sentence[-1] in [\"!\",\"?\",\".\",\",\"]:\n",
451
- " #just add a space\n",
452
- " sentence = sentence[:-1] + \" \" + sentence[-1]\n",
453
- " \n",
454
- " print(\"Sentence:\", sentence)\n",
455
- " \n",
456
- " try: \n",
457
- " # generate speech using precomputed latents\n",
458
- " # This is not streaming but it will be fast\n",
459
- " \n",
460
- " # giving sentence suffix so we can merge all to single audio at end\n",
461
- " # On mobile there is no autoplay support due to mobile security!\n",
462
- " wav = get_voice(sentence,language, latent_map[\"Female_Voice\"], suffix=i)\n",
463
- " wav_list.append(wav)\n",
464
- " \n",
465
- " yield wav\n",
466
- " wait_time= librosa.get_duration(path=wav)\n",
467
- " print(\"Sleeping till audio end\")\n",
468
- " time.sleep(wait_time)\n",
469
- "\n",
470
- " except RuntimeError as e :\n",
471
- " if \"device-side assert\" in str(e):\n",
472
- " # cannot do anything on cuda device side error, need tor estart\n",
473
- " print(f\"Exit due to: Unrecoverable exception caused by prompt:{sentence}\", flush=True)\n",
474
- " gr.Warning(\"Unhandled Exception encounter, please retry in a minute\")\n",
475
- " print(\"Cuda device-assert Runtime encountered need restart\")\n",
476
- "\n",
477
- " \n",
478
- " # HF Space specific.. This error is unrecoverable need to restart space \n",
479
- " api.restart_space(repo_id=repo_id)\n",
480
- " else:\n",
481
- " print(\"RuntimeError: non device-side assert error:\", str(e))\n",
482
- " raise e\n",
483
- " #Spoken on autoplay everysencen now produce a concataned one at the one\n",
484
- " #requires pip install ffmpeg-python\n",
485
- " files_to_concat= [ffmpeg.input(w) for w in wav_list]\n",
486
- " combined_file_name=\"combined.wav\"\n",
487
- " ffmpeg.concat(*files_to_concat,v=0, a=1).output(combined_file_name).run(overwrite_output=True)\n",
488
- "\n",
489
- " return gr.Audio.update(value=combined_file_name, autoplay=False)\n",
490
- " \n",
491
- "\n",
492
- "with gr.Blocks(title=title) as demo:\n",
493
- " gr.Markdown(DESCRIPTION)\n",
494
- " \n",
495
- " \n",
496
- " chatbot = gr.Chatbot(\n",
497
- " [],\n",
498
- " elem_id=\"chatbot\",\n",
499
- " avatar_images=('examples/lama.jpeg', 'examples/lama2.jpeg'),\n",
500
- " bubble_full_width=False,\n",
501
- " )\n",
502
- "\n",
503
- " with gr.Row():\n",
504
- " txt = gr.Textbox(\n",
505
- " scale=3,\n",
506
- " show_label=False,\n",
507
- " placeholder=\"Enter text and press enter, or speak to your microphone\",\n",
508
- " container=False,\n",
509
- " )\n",
510
- " txt_btn = gr.Button(value=\"Submit text\",scale=1)\n",
511
- " btn = gr.Audio(source=\"microphone\", type=\"filepath\", scale=4)\n",
512
- " \n",
513
- " with gr.Row():\n",
514
- " audio = gr.Audio(type=\"numpy\", streaming=False, autoplay=True, label=\"Generated audio response\", show_label=True)\n",
515
- "\n",
516
- " clear_btn = gr.ClearButton([chatbot, audio])\n",
517
- " \n",
518
- " txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n",
519
- " bot, chatbot, chatbot\n",
520
- " ).then(generate_speech, chatbot, audio)\n",
521
- "\n",
522
- " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n",
523
- "\n",
524
- " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n",
525
- " bot, chatbot, chatbot\n",
526
- " ).then(generate_speech, chatbot, audio)\n",
527
- " \n",
528
- " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n",
529
- " \n",
530
- " file_msg = btn.stop_recording(add_file, [chatbot, btn], [chatbot], queue=False).then(\n",
531
- " bot, chatbot, chatbot\n",
532
- " ).then(generate_speech, chatbot, audio)\n",
533
- " \n",
534
- "\n",
535
- " gr.Markdown(\"\"\"\n",
536
- "This Space demonstrates how to speak to a chatbot, based solely on open-source models.\n",
537
- "It relies on 3 models:\n",
538
- "1. [Whisper-large-v2](https://huggingface.co/spaces/sanchit-gandhi/whisper-jax) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).\n",
539
- "2. [Mistral-7b-instruct](https://huggingface.co/spaces/osanseviero/mistral-super-fast) as the chat model, the actual chat model. It is called from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference).\n",
540
- "3. [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a TTS model, to generate the chatbot answers. This time, the model is hosted locally.\n",
541
- "\n",
542
- "Note:\n",
543
- "- By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml\"\"\")\n",
544
- "demo.queue()\n",
545
- "demo.launch(debug=True)"
546
- ]
547
- },
548
- {
549
- "cell_type": "code",
550
- "execution_count": null,
551
- "id": "652d675a-8912-44cb-830d-29fc5d6679d4",
552
- "metadata": {},
553
- "outputs": [],
554
- "source": []
555
- }
556
- ],
557
- "metadata": {
558
- "kernelspec": {
559
- "display_name": "Python 3 (ipykernel)",
560
- "language": "python",
561
- "name": "python3"
562
- },
563
- "language_info": {
564
- "codemirror_mode": {
565
- "name": "ipython",
566
- "version": 3
567
- },
568
- "file_extension": ".py",
569
- "mimetype": "text/x-python",
570
- "name": "python",
571
- "nbconvert_exporter": "python",
572
- "pygments_lexer": "ipython3",
573
- "version": "3.10.12"
574
- }
575
- },
576
- "nbformat": 4,
577
- "nbformat_minor": 5
578
- }