ggoknar commited on
Commit
145f28e
1 Parent(s): 245ae02

xtts and whisper jax

Browse files
Files changed (3) hide show
  1. app.py +131 -30
  2. mistral.ipynb +578 -0
  3. requirements.txt +5 -2
app.py CHANGED
@@ -11,8 +11,35 @@ import nltk # we'll use this to split into sentences
11
  nltk.download('punkt')
12
  import uuid
13
 
 
 
14
  from TTS.api import TTS
15
- tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1", gpu=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  title = "Voice chat with Mistral 7B Instruct"
18
 
@@ -44,11 +71,20 @@ from gradio_client import Client
44
  from huggingface_hub import InferenceClient
45
 
46
 
47
- whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
 
 
 
48
  text_client = InferenceClient(
49
  "mistralai/Mistral-7B-Instruct-v0.1"
50
  )
51
 
 
 
 
 
 
 
52
 
53
  def format_prompt(message, history):
54
  prompt = "<s>"
@@ -77,22 +113,35 @@ def generate(
77
 
78
  formatted_prompt = format_prompt(prompt, history)
79
 
80
- stream = text_client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
- output = ""
82
-
83
- for response in stream:
84
- output += response.token.text
85
- yield output
 
 
 
 
 
 
 
 
 
 
 
86
  return output
87
 
88
 
89
  def transcribe(wav_path):
90
 
 
91
  return whisper_client.predict(
92
  wav_path, # str (filepath or URL to file) in 'inputs' Audio component
93
  "transcribe", # str in 'Task' Radio component
 
94
  api_name="/predict"
95
- )
96
 
97
 
98
  # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
@@ -106,9 +155,17 @@ def add_text(history, text):
106
 
107
  def add_file(history, file):
108
  history = [] if history is None else history
109
- text = transcribe(
110
- file
111
- )
 
 
 
 
 
 
 
 
112
 
113
  history = history + [(text, None)]
114
  return history
@@ -126,29 +183,65 @@ def bot(history, system_prompt=""):
126
  history[-1][1] = character
127
  yield history
128
 
129
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  def generate_speech(history):
131
  text_to_generate = history[-1][1]
132
  text_to_generate = text_to_generate.replace("\n", " ").strip()
133
  text_to_generate = nltk.sent_tokenize(text_to_generate)
134
-
135
- filename = f"{uuid.uuid4()}.wav"
136
- sampling_rate = tts.synthesizer.tts_config.audio["sample_rate"]
137
- silence = [0] * int(0.25 * sampling_rate)
138
 
139
-
140
- for sentence in text_to_generate:
141
- try:
142
 
143
- # generate speech by cloning a voice using default settings
144
- wav = tts.tts(text=sentence,
145
- speaker_wav="examples/female.wav",
146
- decoder_iterations=25,
147
- decoder_sampler="dpm++2m",
148
- speed=1.2,
149
- language="en")
 
 
 
 
 
 
 
150
 
151
- yield (sampling_rate, np.array(wav)) #np.array(wav + silence))
 
 
 
 
 
 
 
 
152
 
153
  except RuntimeError as e :
154
  if "device-side assert" in str(e):
@@ -163,6 +256,14 @@ def generate_speech(history):
163
  else:
164
  print("RuntimeError: non device-side assert error:", str(e))
165
  raise e
 
 
 
 
 
 
 
 
166
 
167
  with gr.Blocks(title=title) as demo:
168
  gr.Markdown(DESCRIPTION)
@@ -186,7 +287,7 @@ with gr.Blocks(title=title) as demo:
186
  btn = gr.Audio(source="microphone", type="filepath", scale=4)
187
 
188
  with gr.Row():
189
- audio = gr.Audio(type="numpy", streaming=True, autoplay=True, label="Generated audio response", show_label=True)
190
 
191
  clear_btn = gr.ClearButton([chatbot, audio])
192
 
@@ -210,7 +311,7 @@ with gr.Blocks(title=title) as demo:
210
  gr.Markdown("""
211
  This Space demonstrates how to speak to a chatbot, based solely on open-source models.
212
  It relies on 3 models:
213
- 1. [Whisper-large-v2](https://huggingface.co/spaces/sanchit-gandhi/whisper-large-v2) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).
214
  2. [Mistral-7b-instruct](https://huggingface.co/spaces/osanseviero/mistral-super-fast) as the chat model, the actual chat model. It is called from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference).
215
  3. [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a TTS model, to generate the chatbot answers. This time, the model is hosted locally.
216
 
 
11
  nltk.download('punkt')
12
  import uuid
13
 
14
+ import librosa
15
+ import torchaudio
16
  from TTS.api import TTS
17
+ from TTS.tts.configs.xtts_config import XttsConfig
18
+ from TTS.tts.models.xtts import Xtts
19
+ from TTS.utils.generic_utils import get_user_data_dir
20
+
21
+ # This will trigger downloading model
22
+ print("Downloading if not downloaded Coqui XTTS V1")
23
+ tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1")
24
+ del tts
25
+ print("XTTS downloaded")
26
+
27
+ print("Loading XTTS")
28
+ #Below will use model directly for inference
29
+ model_path = os.path.join(get_user_data_dir("tts"), "tts_models--multilingual--multi-dataset--xtts_v1")
30
+ config = XttsConfig()
31
+ config.load_json(os.path.join(model_path, "config.json"))
32
+ model = Xtts.init_from_config(config)
33
+ model.load_checkpoint(
34
+ config,
35
+ checkpoint_path=os.path.join(model_path, "model.pth"),
36
+ vocab_path=os.path.join(model_path, "vocab.json"),
37
+ eval=True,
38
+ use_deepspeed=True
39
+ )
40
+ model.cuda()
41
+ print("Done loading TTS")
42
+
43
 
44
  title = "Voice chat with Mistral 7B Instruct"
45
 
 
71
  from huggingface_hub import InferenceClient
72
 
73
 
74
+ # This client is down
75
+ #whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
76
+ # Replacement whisper client, it may be time limited
77
+ whisper_client = Client("https://sanchit-gandhi-whisper-jax.hf.space")
78
  text_client = InferenceClient(
79
  "mistralai/Mistral-7B-Instruct-v0.1"
80
  )
81
 
82
+ ###### COQUI TTS FUNCTIONS ######
83
+ def get_latents(speaker_wav):
84
+ # create as function as we can populate here with voice cleanup/filtering
85
+ gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)
86
+ return gpt_cond_latent, diffusion_conditioning, speaker_embedding
87
+
88
 
89
  def format_prompt(message, history):
90
  prompt = "<s>"
 
113
 
114
  formatted_prompt = format_prompt(prompt, history)
115
 
116
+ try:
117
+ stream = text_client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
118
+ output = ""
119
+ for response in stream:
120
+ output += response.token.text
121
+ yield output
122
+
123
+ except Exception as e:
124
+ if "Too Many Requests" in str(e):
125
+ print("ERROR: Too many requests on mistral client")
126
+ gr.Warning("Unfortunately Mistral is unable to process")
127
+ output = "Unfortuanately I am not able to process your request now !"
128
+ else:
129
+ print("Unhandled Exception: ", str(e))
130
+ gr.Warning("Unfortunately Mistral is unable to process")
131
+ output = "I do not know what happened but I could not understand you ."
132
+
133
  return output
134
 
135
 
136
  def transcribe(wav_path):
137
 
138
+ # get first element from whisper_jax and strip it to delete begin and end space
139
  return whisper_client.predict(
140
  wav_path, # str (filepath or URL to file) in 'inputs' Audio component
141
  "transcribe", # str in 'Task' Radio component
142
+ False, # return_timestamps=False for whisper-jax https://gist.github.com/sanchit-gandhi/781dd7003c5b201bfe16d28634c8d4cf#file-whisper_jax_endpoint-py
143
  api_name="/predict"
144
+ )[0].strip()
145
 
146
 
147
  # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
 
155
 
156
  def add_file(history, file):
157
  history = [] if history is None else history
158
+
159
+ try:
160
+ text = transcribe(
161
+ file
162
+ )
163
+ print("Transcribed text:",text)
164
+ except Exception as e:
165
+ print(str(e))
166
+ gr.Warning("There was an issue with transcription, please try writing for now")
167
+ # Apply a null text on error
168
+ text = "Transcription seems failed, please tell me a joke about chickens"
169
 
170
  history = history + [(text, None)]
171
  return history
 
183
  history[-1][1] = character
184
  yield history
185
 
186
+
187
+ def get_latents(speaker_wav):
188
+ # Generate speaker embedding and latents for TTS
189
+ gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)
190
+ return gpt_cond_latent, diffusion_conditioning, speaker_embedding
191
+
192
+ latent_map={}
193
+ latent_map["Female_Voice"] = get_latents("examples/female.wav")
194
+
195
+ def get_voice(prompt,language, latent_tuple,suffix="0"):
196
+ gpt_cond_latent,diffusion_conditioning, speaker_embedding = latent_tuple
197
+ # Direct version
198
+ t0 = time.time()
199
+ out = model.inference(
200
+ prompt,
201
+ language,
202
+ gpt_cond_latent,
203
+ speaker_embedding,
204
+ diffusion_conditioning
205
+ )
206
+ inference_time = time.time() - t0
207
+ print(f"I: Time to generate audio: {round(inference_time*1000)} milliseconds")
208
+ real_time_factor= (time.time() - t0) / out['wav'].shape[-1] * 24000
209
+ print(f"Real-time factor (RTF): {real_time_factor}")
210
+ wav_filename=f"output_{suffix}.wav"
211
+ torchaudio.save(wav_filename, torch.tensor(out["wav"]).unsqueeze(0), 24000)
212
+ return wav_filename
213
+
214
  def generate_speech(history):
215
  text_to_generate = history[-1][1]
216
  text_to_generate = text_to_generate.replace("\n", " ").strip()
217
  text_to_generate = nltk.sent_tokenize(text_to_generate)
 
 
 
 
218
 
219
+ language = "en"
 
 
220
 
221
+ wav_list = []
222
+ for i,sentence in enumerate(text_to_generate):
223
+ # Sometimes prompt </s> coming on output remove it
224
+ sentence= sentence.replace("</s>","")
225
+ # A fast fix for last chacter, may produce weird sounds if it is with text
226
+ if sentence[-1] in ["!","?",".",","]:
227
+ #just add a space
228
+ sentence = sentence[:-1] + " " + sentence[-1]
229
+
230
+ print("Sentence:", sentence)
231
+
232
+ try:
233
+ # generate speech using precomputed latents
234
+ # This is not streaming but it will be fast
235
 
236
+ # giving sentence suffix so we can merge all to single audio at end
237
+ # On mobile there is no autoplay support due to mobile security!
238
+ wav = get_voice(sentence,language, latent_map["Female_Voice"], suffix=i)
239
+ wav_list.append(wav)
240
+
241
+ yield wav
242
+ wait_time= librosa.get_duration(path=wav)
243
+ print("Sleeping till audio end")
244
+ time.sleep(wait_time)
245
 
246
  except RuntimeError as e :
247
  if "device-side assert" in str(e):
 
256
  else:
257
  print("RuntimeError: non device-side assert error:", str(e))
258
  raise e
259
+ #Spoken on autoplay everysencen now produce a concataned one at the one
260
+ #requires pip install ffmpeg-python
261
+ files_to_concat= [ffmpeg.input(w) for w in wav_list]
262
+ combined_file_name="combined.wav"
263
+ ffmpeg.concat(*files_to_concat,v=0, a=1).output(combined_file_name).run(overwrite_output=True)
264
+
265
+ return gr.Audio.update(value=combined_file_name, autoplay=False)
266
+
267
 
268
  with gr.Blocks(title=title) as demo:
269
  gr.Markdown(DESCRIPTION)
 
287
  btn = gr.Audio(source="microphone", type="filepath", scale=4)
288
 
289
  with gr.Row():
290
+ audio = gr.Audio(type="numpy", streaming=False, autoplay=True, label="Generated audio response", show_label=True)
291
 
292
  clear_btn = gr.ClearButton([chatbot, audio])
293
 
 
311
  gr.Markdown("""
312
  This Space demonstrates how to speak to a chatbot, based solely on open-source models.
313
  It relies on 3 models:
314
+ 1. [Whisper-large-v2](https://huggingface.co/spaces/sanchit-gandhi/whisper-jax) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).
315
  2. [Mistral-7b-instruct](https://huggingface.co/spaces/osanseviero/mistral-super-fast) as the chat model, the actual chat model. It is called from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference).
316
  3. [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a TTS model, to generate the chatbot answers. This time, the model is hosted locally.
317
 
mistral.ipynb ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "f8bdd950-1b95-4088-890a-94417292f6e1",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stderr",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "[nltk_data] Downloading package punkt to /home/gorkem/nltk_data...\n",
14
+ "[nltk_data] Package punkt is already up-to-date!\n",
15
+ "2023-10-13 00:33:39.399490: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
16
+ ]
17
+ },
18
+ {
19
+ "name": "stdout",
20
+ "output_type": "stream",
21
+ "text": [
22
+ "Downloading if not downloaded Coqui XTTS V1\n",
23
+ " > tts_models/multilingual/multi-dataset/xtts_v1 is already downloaded.\n",
24
+ " > Using model: xtts\n",
25
+ "XTTS downloaded\n",
26
+ "Loading XTTS\n",
27
+ "[2023-10-13 00:34:12,573] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed info: version=0.8.3+f1e4fb0b, git-hash=f1e4fb0b, git-branch=HEAD\n",
28
+ "[2023-10-13 00:34:12,587] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter replace_method is deprecated. This parameter is no longer needed, please remove from your call to DeepSpeed-inference\n",
29
+ "[2023-10-13 00:34:12,589] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter mp_size is deprecated use tensor_parallel.tp_size instead\n",
30
+ "[2023-10-13 00:34:12,590] [INFO] [logging.py:93:log_dist] [Rank -1] quantize_bits = 8 mlp_extra_grouping = False, quantize_groups = 1\n",
31
+ "[2023-10-13 00:34:12,854] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed-Inference config: {'layer_id': 0, 'hidden_size': 1024, 'intermediate_size': 4096, 'heads': 16, 'num_hidden_layers': -1, 'fp16': False, 'pre_layer_norm': True, 'local_rank': -1, 'stochastic_mode': False, 'epsilon': 1e-05, 'mp_size': 1, 'q_int8': False, 'scale_attention': True, 'triangular_masking': True, 'local_attention': False, 'window_size': 1, 'rotary_dim': -1, 'rotate_half': False, 'rotate_every_two': True, 'return_tuple': True, 'mlp_after_attn': True, 'mlp_act_func_type': <ActivationFuncType.GELU: 1>, 'specialized_mode': False, 'training_mp_size': 1, 'bigscience_bloom': False, 'max_out_tokens': 1024, 'scale_attn_by_inverse_layer_idx': False, 'enable_qkv_quantization': False, 'use_mup': False, 'return_single_tuple': False}\n",
32
+ "Done loading TTS\n",
33
+ "Loaded as API: https://sanchit-gandhi-whisper-jax.hf.space/ ✔\n"
34
+ ]
35
+ }
36
+ ],
37
+ "source": [
38
+ "from __future__ import annotations\n",
39
+ "\n",
40
+ "import os\n",
41
+ "# By using XTTS you agree to CPML license https://coqui.ai/cpml\n",
42
+ "os.environ[\"COQUI_TOS_AGREED\"] = \"1\"\n",
43
+ "\n",
44
+ "import gradio as gr\n",
45
+ "import numpy as np\n",
46
+ "import torch\n",
47
+ "import nltk # we'll use this to split into sentences\n",
48
+ "nltk.download('punkt')\n",
49
+ "import uuid\n",
50
+ "\n",
51
+ "import librosa\n",
52
+ "import torchaudio\n",
53
+ "from TTS.api import TTS\n",
54
+ "from TTS.tts.configs.xtts_config import XttsConfig\n",
55
+ "from TTS.tts.models.xtts import Xtts\n",
56
+ "from TTS.utils.generic_utils import get_user_data_dir\n",
57
+ "\n",
58
+ "# This will trigger downloading model\n",
59
+ "print(\"Downloading if not downloaded Coqui XTTS V1\")\n",
60
+ "tts = TTS(\"tts_models/multilingual/multi-dataset/xtts_v1\")\n",
61
+ "del tts\n",
62
+ "print(\"XTTS downloaded\")\n",
63
+ "\n",
64
+ "print(\"Loading XTTS\")\n",
65
+ "#Below will use model directly for inference\n",
66
+ "model_path = os.path.join(get_user_data_dir(\"tts\"), \"tts_models--multilingual--multi-dataset--xtts_v1\")\n",
67
+ "config = XttsConfig()\n",
68
+ "config.load_json(os.path.join(model_path, \"config.json\"))\n",
69
+ "model = Xtts.init_from_config(config)\n",
70
+ "model.load_checkpoint(\n",
71
+ " config,\n",
72
+ " checkpoint_path=os.path.join(model_path, \"model.pth\"),\n",
73
+ " vocab_path=os.path.join(model_path, \"vocab.json\"),\n",
74
+ " eval=True,\n",
75
+ " use_deepspeed=True\n",
76
+ ")\n",
77
+ "model.cuda()\n",
78
+ "print(\"Done loading TTS\")\n",
79
+ "\n",
80
+ "\n",
81
+ "title = \"Voice chat with Mistral 7B Instruct\"\n",
82
+ "\n",
83
+ "DESCRIPTION = \"\"\"# Voice chat with Mistral 7B Instruct\"\"\"\n",
84
+ "css = \"\"\".toast-wrap { display: none !important } \"\"\"\n",
85
+ "\n",
86
+ "from huggingface_hub import HfApi\n",
87
+ "HF_TOKEN = os.environ.get(\"HF_TOKEN\")\n",
88
+ "# will use api to restart space on a unrecoverable error\n",
89
+ "api = HfApi(token=HF_TOKEN)\n",
90
+ "\n",
91
+ "repo_id = \"ylacombe/voice-chat-with-lama\"\n",
92
+ "\n",
93
+ "system_message = \"\\nYou are a helpful, respectful and honest assistant. Your answers are short, ideally a few words long, if it is possible. Always answer as helpfully as possible, while being safe.\\n\\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\"\n",
94
+ "temperature = 0.9\n",
95
+ "top_p = 0.6\n",
96
+ "repetition_penalty = 1.2\n",
97
+ "\n",
98
+ "\n",
99
+ "import gradio as gr\n",
100
+ "import os\n",
101
+ "import time\n",
102
+ "\n",
103
+ "import gradio as gr\n",
104
+ "from transformers import pipeline\n",
105
+ "import numpy as np\n",
106
+ "\n",
107
+ "from gradio_client import Client\n",
108
+ "from huggingface_hub import InferenceClient\n",
109
+ "\n",
110
+ "\n",
111
+ "# This client is down\n",
112
+ "#whisper_client = Client(\"https://sanchit-gandhi-whisper-large-v2.hf.space/\")\n",
113
+ "# Replacement whisper client, it may be time limited\n",
114
+ "whisper_client = Client(\"https://sanchit-gandhi-whisper-jax.hf.space\")\n",
115
+ "text_client = InferenceClient(\n",
116
+ " \"mistralai/Mistral-7B-Instruct-v0.1\"\n",
117
+ ")\n"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "id": "d8687cd2-e989-4db9-b16a-04ad9460e6f1",
124
+ "metadata": {},
125
+ "outputs": [
126
+ {
127
+ "name": "stdout",
128
+ "output_type": "stream",
129
+ "text": [
130
+ "Running on local URL: http://127.0.0.1:7861\n",
131
+ "\n",
132
+ "To create a public link, set `share=True` in `launch()`.\n"
133
+ ]
134
+ },
135
+ {
136
+ "data": {
137
+ "text/html": [
138
+ "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
139
+ ],
140
+ "text/plain": [
141
+ "<IPython.core.display.HTML object>"
142
+ ]
143
+ },
144
+ "metadata": {},
145
+ "output_type": "display_data"
146
+ },
147
+ {
148
+ "name": "stdout",
149
+ "output_type": "stream",
150
+ "text": [
151
+ "ERROR: Too many requests on mistral client\n"
152
+ ]
153
+ },
154
+ {
155
+ "name": "stderr",
156
+ "output_type": "stream",
157
+ "text": [
158
+ "Traceback (most recent call last):\n",
159
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
160
+ " output = await route_utils.call_process_api(\n",
161
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
162
+ " output = await app.get_blocks().process_api(\n",
163
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
164
+ " result = await self.call_function(\n",
165
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
166
+ " prediction = await utils.async_iteration(iterator)\n",
167
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
168
+ " return await iterator.__anext__()\n",
169
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
170
+ " return await anyio.to_thread.run_sync(\n",
171
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
172
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
173
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
174
+ " return await future\n",
175
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
176
+ " result = context.run(func, *args)\n",
177
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
178
+ " return next(iterator)\n",
179
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
180
+ " yield from f(*args, **kwargs)\n",
181
+ " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
182
+ " text_to_generate = history[-1][1]\n",
183
+ "TypeError: 'NoneType' object is not subscriptable\n"
184
+ ]
185
+ },
186
+ {
187
+ "name": "stdout",
188
+ "output_type": "stream",
189
+ "text": [
190
+ "ERROR: Too many requests on mistral client\n"
191
+ ]
192
+ },
193
+ {
194
+ "name": "stderr",
195
+ "output_type": "stream",
196
+ "text": [
197
+ "Traceback (most recent call last):\n",
198
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
199
+ " output = await route_utils.call_process_api(\n",
200
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
201
+ " output = await app.get_blocks().process_api(\n",
202
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
203
+ " result = await self.call_function(\n",
204
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
205
+ " prediction = await utils.async_iteration(iterator)\n",
206
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
207
+ " return await iterator.__anext__()\n",
208
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
209
+ " return await anyio.to_thread.run_sync(\n",
210
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
211
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
212
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
213
+ " return await future\n",
214
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
215
+ " result = context.run(func, *args)\n",
216
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
217
+ " return next(iterator)\n",
218
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
219
+ " yield from f(*args, **kwargs)\n",
220
+ " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
221
+ " text_to_generate = history[-1][1]\n",
222
+ "TypeError: 'NoneType' object is not subscriptable\n"
223
+ ]
224
+ },
225
+ {
226
+ "name": "stdout",
227
+ "output_type": "stream",
228
+ "text": [
229
+ "ERROR: Too many requests on mistral client\n"
230
+ ]
231
+ },
232
+ {
233
+ "name": "stderr",
234
+ "output_type": "stream",
235
+ "text": [
236
+ "Traceback (most recent call last):\n",
237
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
238
+ " output = await route_utils.call_process_api(\n",
239
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
240
+ " output = await app.get_blocks().process_api(\n",
241
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
242
+ " result = await self.call_function(\n",
243
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
244
+ " prediction = await utils.async_iteration(iterator)\n",
245
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
246
+ " return await iterator.__anext__()\n",
247
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
248
+ " return await anyio.to_thread.run_sync(\n",
249
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
250
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
251
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
252
+ " return await future\n",
253
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
254
+ " result = context.run(func, *args)\n",
255
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
256
+ " return next(iterator)\n",
257
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
258
+ " yield from f(*args, **kwargs)\n",
259
+ " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
260
+ " text_to_generate = history[-1][1]\n",
261
+ "TypeError: 'NoneType' object is not subscriptable\n"
262
+ ]
263
+ },
264
+ {
265
+ "name": "stdout",
266
+ "output_type": "stream",
267
+ "text": [
268
+ "ERROR: Too many requests on mistral client\n"
269
+ ]
270
+ },
271
+ {
272
+ "name": "stderr",
273
+ "output_type": "stream",
274
+ "text": [
275
+ "Traceback (most recent call last):\n",
276
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/queueing.py\", line 388, in call_prediction\n",
277
+ " output = await route_utils.call_process_api(\n",
278
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/route_utils.py\", line 219, in call_process_api\n",
279
+ " output = await app.get_blocks().process_api(\n",
280
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1437, in process_api\n",
281
+ " result = await self.call_function(\n",
282
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/blocks.py\", line 1123, in call_function\n",
283
+ " prediction = await utils.async_iteration(iterator)\n",
284
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 503, in async_iteration\n",
285
+ " return await iterator.__anext__()\n",
286
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 496, in __anext__\n",
287
+ " return await anyio.to_thread.run_sync(\n",
288
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/to_thread.py\", line 31, in run_sync\n",
289
+ " return await get_asynclib().run_sync_in_worker_thread(\n",
290
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 937, in run_sync_in_worker_thread\n",
291
+ " return await future\n",
292
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/anyio/_backends/_asyncio.py\", line 867, in run\n",
293
+ " result = context.run(func, *args)\n",
294
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 479, in run_sync_iterator_async\n",
295
+ " return next(iterator)\n",
296
+ " File \"/home/gorkem/.local/lib/python3.10/site-packages/gradio/utils.py\", line 629, in gen_wrapper\n",
297
+ " yield from f(*args, **kwargs)\n",
298
+ " File \"/tmp/ipykernel_8679/550220560.py\", line 134, in generate_speech\n",
299
+ " text_to_generate = history[-1][1]\n",
300
+ "TypeError: 'NoneType' object is not subscriptable\n"
301
+ ]
302
+ }
303
+ ],
304
+ "source": [
305
+ "\n",
306
+ "###### COQUI TTS FUNCTIONS ######\n",
307
+ "def get_latents(speaker_wav):\n",
308
+ " # create as function as we can populate here with voice cleanup/filtering\n",
309
+ " gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)\n",
310
+ " return gpt_cond_latent, diffusion_conditioning, speaker_embedding\n",
311
+ "\n",
312
+ "\n",
313
+ "def format_prompt(message, history):\n",
314
+ " prompt = \"<s>\"\n",
315
+ " for user_prompt, bot_response in history:\n",
316
+ " prompt += f\"[INST] {user_prompt} [/INST]\"\n",
317
+ " prompt += f\" {bot_response}</s> \"\n",
318
+ " prompt += f\"[INST] {message} [/INST]\"\n",
319
+ " return prompt\n",
320
+ "\n",
321
+ "def generate(\n",
322
+ " prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,\n",
323
+ "):\n",
324
+ " temperature = float(temperature)\n",
325
+ " if temperature < 1e-2:\n",
326
+ " temperature = 1e-2\n",
327
+ " top_p = float(top_p)\n",
328
+ "\n",
329
+ " generate_kwargs = dict(\n",
330
+ " temperature=temperature,\n",
331
+ " max_new_tokens=max_new_tokens,\n",
332
+ " top_p=top_p,\n",
333
+ " repetition_penalty=repetition_penalty,\n",
334
+ " do_sample=True,\n",
335
+ " seed=42,\n",
336
+ " )\n",
337
+ "\n",
338
+ " formatted_prompt = format_prompt(prompt, history)\n",
339
+ "\n",
340
+ " try:\n",
341
+ " stream = text_client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)\n",
342
+ " output = \"\"\n",
343
+ " for response in stream:\n",
344
+ " output += response.token.text\n",
345
+ " yield output\n",
346
+ "\n",
347
+ " except Exception as e:\n",
348
+ " if \"Too Many Requests\" in str(e):\n",
349
+ " print(\"ERROR: Too many requests on mistral client\")\n",
350
+ " gr.Warning(\"Unfortunately Mistral is unable to process\")\n",
351
+ " output = \"Unfortuanately I am not able to process your request now !\"\n",
352
+ " else:\n",
353
+ " print(\"Unhandled Exception: \", str(e))\n",
354
+ " gr.Warning(\"Unfortunately Mistral is unable to process\")\n",
355
+ " output = \"I do not know what happened but I could not understand you .\"\n",
356
+ " \n",
357
+ " return output\n",
358
+ "\n",
359
+ "\n",
360
+ "def transcribe(wav_path):\n",
361
+ " \n",
362
+ " # get first element from whisper_jax and strip it to delete begin and end space\n",
363
+ " return whisper_client.predict(\n",
364
+ "\t\t\t\twav_path,\t# str (filepath or URL to file) in 'inputs' Audio component\n",
365
+ "\t\t\t\t\"transcribe\",\t# str in 'Task' Radio component\n",
366
+ " False, # return_timestamps=False for whisper-jax https://gist.github.com/sanchit-gandhi/781dd7003c5b201bfe16d28634c8d4cf#file-whisper_jax_endpoint-py\n",
367
+ "\t\t\t\tapi_name=\"/predict\"\n",
368
+ " )[0].strip()\n",
369
+ " \n",
370
+ "\n",
371
+ "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n",
372
+ "\n",
373
+ "\n",
374
+ "def add_text(history, text):\n",
375
+ " history = [] if history is None else history\n",
376
+ " history = history + [(text, None)]\n",
377
+ " return history, gr.update(value=\"\", interactive=False)\n",
378
+ "\n",
379
+ "\n",
380
+ "def add_file(history, file):\n",
381
+ " history = [] if history is None else history\n",
382
+ " \n",
383
+ " try:\n",
384
+ " text = transcribe(\n",
385
+ " file\n",
386
+ " )\n",
387
+ " print(\"Transcribed text:\",text)\n",
388
+ " except Exception as e:\n",
389
+ " print(str(e))\n",
390
+ " gr.Warning(\"There was an issue with transcription, please try writing for now\")\n",
391
+ " # Apply a null text on error\n",
392
+ " text = \"Transcription seems failed, please tell me a joke about chickens\"\n",
393
+ " \n",
394
+ " history = history + [(text, None)]\n",
395
+ " return history\n",
396
+ "\n",
397
+ "\n",
398
+ "\n",
399
+ "def bot(history, system_prompt=\"\"): \n",
400
+ " history = [] if history is None else history\n",
401
+ "\n",
402
+ " if system_prompt == \"\":\n",
403
+ " system_prompt = system_message\n",
404
+ " \n",
405
+ " history[-1][1] = \"\"\n",
406
+ " for character in generate(history[-1][0], history[:-1]):\n",
407
+ " history[-1][1] = character\n",
408
+ " yield history \n",
409
+ "\n",
410
+ "\n",
411
+ "def get_latents(speaker_wav):\n",
412
+ " # Generate speaker embedding and latents for TTS\n",
413
+ " gpt_cond_latent, diffusion_conditioning, speaker_embedding = model.get_conditioning_latents(audio_path=speaker_wav)\n",
414
+ " return gpt_cond_latent, diffusion_conditioning, speaker_embedding\n",
415
+ "\n",
416
+ "latent_map={}\n",
417
+ "latent_map[\"Female_Voice\"] = get_latents(\"examples/female.wav\")\n",
418
+ "\n",
419
+ "def get_voice(prompt,language, latent_tuple,suffix=\"0\"):\n",
420
+ " gpt_cond_latent,diffusion_conditioning, speaker_embedding = latent_tuple\n",
421
+ " # Direct version\n",
422
+ " t0 = time.time()\n",
423
+ " out = model.inference(\n",
424
+ " prompt,\n",
425
+ " language,\n",
426
+ " gpt_cond_latent,\n",
427
+ " speaker_embedding,\n",
428
+ " diffusion_conditioning\n",
429
+ " )\n",
430
+ " inference_time = time.time() - t0\n",
431
+ " print(f\"I: Time to generate audio: {round(inference_time*1000)} milliseconds\")\n",
432
+ " real_time_factor= (time.time() - t0) / out['wav'].shape[-1] * 24000\n",
433
+ " print(f\"Real-time factor (RTF): {real_time_factor}\")\n",
434
+ " wav_filename=f\"output_{suffix}.wav\"\n",
435
+ " torchaudio.save(wav_filename, torch.tensor(out[\"wav\"]).unsqueeze(0), 24000)\n",
436
+ " return wav_filename\n",
437
+ "\n",
438
+ "def generate_speech(history):\n",
439
+ " text_to_generate = history[-1][1]\n",
440
+ " text_to_generate = text_to_generate.replace(\"\\n\", \" \").strip()\n",
441
+ " text_to_generate = nltk.sent_tokenize(text_to_generate)\n",
442
+ "\n",
443
+ " language = \"en\"\n",
444
+ "\n",
445
+ " wav_list = []\n",
446
+ " for i,sentence in enumerate(text_to_generate):\n",
447
+ " # Sometimes prompt </s> coming on output remove it \n",
448
+ " sentence= sentence.replace(\"</s>\",\"\")\n",
449
+ " # A fast fix for last chacter, may produce weird sounds if it is with text\n",
450
+ " if sentence[-1] in [\"!\",\"?\",\".\",\",\"]:\n",
451
+ " #just add a space\n",
452
+ " sentence = sentence[:-1] + \" \" + sentence[-1]\n",
453
+ " \n",
454
+ " print(\"Sentence:\", sentence)\n",
455
+ " \n",
456
+ " try: \n",
457
+ " # generate speech using precomputed latents\n",
458
+ " # This is not streaming but it will be fast\n",
459
+ " \n",
460
+ " # giving sentence suffix so we can merge all to single audio at end\n",
461
+ " # On mobile there is no autoplay support due to mobile security!\n",
462
+ " wav = get_voice(sentence,language, latent_map[\"Female_Voice\"], suffix=i)\n",
463
+ " wav_list.append(wav)\n",
464
+ " \n",
465
+ " yield wav\n",
466
+ " wait_time= librosa.get_duration(path=wav)\n",
467
+ " print(\"Sleeping till audio end\")\n",
468
+ " time.sleep(wait_time)\n",
469
+ "\n",
470
+ " except RuntimeError as e :\n",
471
+ " if \"device-side assert\" in str(e):\n",
472
+ " # cannot do anything on cuda device side error, need tor estart\n",
473
+ " print(f\"Exit due to: Unrecoverable exception caused by prompt:{sentence}\", flush=True)\n",
474
+ " gr.Warning(\"Unhandled Exception encounter, please retry in a minute\")\n",
475
+ " print(\"Cuda device-assert Runtime encountered need restart\")\n",
476
+ "\n",
477
+ " \n",
478
+ " # HF Space specific.. This error is unrecoverable need to restart space \n",
479
+ " api.restart_space(repo_id=repo_id)\n",
480
+ " else:\n",
481
+ " print(\"RuntimeError: non device-side assert error:\", str(e))\n",
482
+ " raise e\n",
483
+ " #Spoken on autoplay everysencen now produce a concataned one at the one\n",
484
+ " #requires pip install ffmpeg-python\n",
485
+ " files_to_concat= [ffmpeg.input(w) for w in wav_list]\n",
486
+ " combined_file_name=\"combined.wav\"\n",
487
+ " ffmpeg.concat(*files_to_concat,v=0, a=1).output(combined_file_name).run(overwrite_output=True)\n",
488
+ "\n",
489
+ " return gr.Audio.update(value=combined_file_name, autoplay=False)\n",
490
+ " \n",
491
+ "\n",
492
+ "with gr.Blocks(title=title) as demo:\n",
493
+ " gr.Markdown(DESCRIPTION)\n",
494
+ " \n",
495
+ " \n",
496
+ " chatbot = gr.Chatbot(\n",
497
+ " [],\n",
498
+ " elem_id=\"chatbot\",\n",
499
+ " avatar_images=('examples/lama.jpeg', 'examples/lama2.jpeg'),\n",
500
+ " bubble_full_width=False,\n",
501
+ " )\n",
502
+ "\n",
503
+ " with gr.Row():\n",
504
+ " txt = gr.Textbox(\n",
505
+ " scale=3,\n",
506
+ " show_label=False,\n",
507
+ " placeholder=\"Enter text and press enter, or speak to your microphone\",\n",
508
+ " container=False,\n",
509
+ " )\n",
510
+ " txt_btn = gr.Button(value=\"Submit text\",scale=1)\n",
511
+ " btn = gr.Audio(source=\"microphone\", type=\"filepath\", scale=4)\n",
512
+ " \n",
513
+ " with gr.Row():\n",
514
+ " audio = gr.Audio(type=\"numpy\", streaming=False, autoplay=True, label=\"Generated audio response\", show_label=True)\n",
515
+ "\n",
516
+ " clear_btn = gr.ClearButton([chatbot, audio])\n",
517
+ " \n",
518
+ " txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n",
519
+ " bot, chatbot, chatbot\n",
520
+ " ).then(generate_speech, chatbot, audio)\n",
521
+ "\n",
522
+ " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n",
523
+ "\n",
524
+ " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n",
525
+ " bot, chatbot, chatbot\n",
526
+ " ).then(generate_speech, chatbot, audio)\n",
527
+ " \n",
528
+ " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n",
529
+ " \n",
530
+ " file_msg = btn.stop_recording(add_file, [chatbot, btn], [chatbot], queue=False).then(\n",
531
+ " bot, chatbot, chatbot\n",
532
+ " ).then(generate_speech, chatbot, audio)\n",
533
+ " \n",
534
+ "\n",
535
+ " gr.Markdown(\"\"\"\n",
536
+ "This Space demonstrates how to speak to a chatbot, based solely on open-source models.\n",
537
+ "It relies on 3 models:\n",
538
+ "1. [Whisper-large-v2](https://huggingface.co/spaces/sanchit-gandhi/whisper-jax) as an ASR model, to transcribe recorded audio to text. It is called through a [gradio client](https://www.gradio.app/docs/client).\n",
539
+ "2. [Mistral-7b-instruct](https://huggingface.co/spaces/osanseviero/mistral-super-fast) as the chat model, the actual chat model. It is called from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference).\n",
540
+ "3. [Coqui's XTTS](https://huggingface.co/spaces/coqui/xtts) as a TTS model, to generate the chatbot answers. This time, the model is hosted locally.\n",
541
+ "\n",
542
+ "Note:\n",
543
+ "- By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml\"\"\")\n",
544
+ "demo.queue()\n",
545
+ "demo.launch(debug=True)"
546
+ ]
547
+ },
548
+ {
549
+ "cell_type": "code",
550
+ "execution_count": null,
551
+ "id": "652d675a-8912-44cb-830d-29fc5d6679d4",
552
+ "metadata": {},
553
+ "outputs": [],
554
+ "source": []
555
+ }
556
+ ],
557
+ "metadata": {
558
+ "kernelspec": {
559
+ "display_name": "Python 3 (ipykernel)",
560
+ "language": "python",
561
+ "name": "python3"
562
+ },
563
+ "language_info": {
564
+ "codemirror_mode": {
565
+ "name": "ipython",
566
+ "version": 3
567
+ },
568
+ "file_extension": ".py",
569
+ "mimetype": "text/x-python",
570
+ "name": "python",
571
+ "nbconvert_exporter": "python",
572
+ "pygments_lexer": "ipython3",
573
+ "version": "3.10.12"
574
+ }
575
+ },
576
+ "nbformat": 4,
577
+ "nbformat_minor": 5
578
+ }
requirements.txt CHANGED
@@ -53,8 +53,11 @@ encodec==0.1.*
53
  # deps for XTTS
54
  unidecode==1.3.*
55
  langid
56
- # Install tts
57
- git+https://github.com/coqui-ai/tts.git@43a7ca800b6508d95e084728a948846556f71a40
 
58
  deepspeed==0.8.3
59
  pydub
 
 
60
  gradio_client
 
53
  # deps for XTTS
54
  unidecode==1.3.*
55
  langid
56
+ # Install Coqui TTS
57
+ TTS==0.17.8
58
+ # Deepspeed for fast inference
59
  deepspeed==0.8.3
60
  pydub
61
+ librosa
62
+ ffmpeg-python
63
  gradio_client