umair894 commited on
Commit
8ef1aaa
β€’
1 Parent(s): 24ed103

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +835 -0
app.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os
3
+ #download for mecab
4
+ os.system('python -m unidic download')
5
+
6
+ # we need to compile a CUBLAS version
7
+ # Or get it from https://jllllll.github.io/llama-cpp-python-cuBLAS-wheels/
8
+ os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python==0.2.11')
9
+
10
+ # By using XTTS you agree to CPML license https://coqui.ai/cpml
11
+ os.environ["COQUI_TOS_AGREED"] = "1"
12
+
13
+ # NOTE: for streaming will require gradio audio streaming fix
14
+ # pip install --upgrade -y gradio==0.50.2 git+https://github.com/gorkemgoknar/gradio.git@patch-1
15
+
16
+ import textwrap
17
+ from scipy.io.wavfile import write
18
+ from pydub import AudioSegment
19
+ import gradio as gr
20
+ import numpy as np
21
+ import torch
22
+ import nltk # we'll use this to split into sentences
23
+ nltk.download("punkt")
24
+
25
+ import noisereduce as nr
26
+ import subprocess
27
+ import langid
28
+ import uuid
29
+ import emoji
30
+ import pathlib
31
+
32
+ import datetime
33
+
34
+ from scipy.io.wavfile import write
35
+ from pydub import AudioSegment
36
+
37
+ import re
38
+ import io, wave
39
+ import librosa
40
+ import torchaudio
41
+ from TTS.api import TTS
42
+ from TTS.tts.configs.xtts_config import XttsConfig
43
+ from TTS.tts.models.xtts import Xtts
44
+ from TTS.utils.generic_utils import get_user_data_dir
45
+
46
+
47
+ import gradio as gr
48
+ import os
49
+ import time
50
+
51
+ import gradio as gr
52
+ from transformers import pipeline
53
+ import numpy as np
54
+
55
+ from gradio_client import Client
56
+ from huggingface_hub import InferenceClient
57
+
58
+ # This will trigger downloading model
59
+ print("Downloading if not downloaded Coqui XTTS V2")
60
+
61
+ from TTS.utils.manage import ModelManager
62
+ model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
63
+ ModelManager().download_model(model_name)
64
+ model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
65
+ print("XTTS downloaded")
66
+
67
+
68
+ print("Loading XTTS")
69
+ config = XttsConfig()
70
+ config.load_json(os.path.join(model_path, "config.json"))
71
+
72
+ model = Xtts.init_from_config(config)
73
+ model.load_checkpoint(
74
+ config,
75
+ checkpoint_path=os.path.join(model_path, "model.pth"),
76
+ vocab_path=os.path.join(model_path, "vocab.json"),
77
+ eval=True,
78
+ use_deepspeed=True,
79
+ )
80
+ model.cuda()
81
+ print("Done loading TTS")
82
+
83
+ #####llm_model = os.environ.get("LLM_MODEL", "mistral") # or "zephyr"
84
+
85
+ title = "Voice chat"
86
+
87
+ from huggingface_hub import HfApi
88
+
89
+ HF_TOKEN = os.environ.get("HF_TOKEN")
90
+ # will use api to restart space on a unrecoverable error
91
+ api = HfApi(token=HF_TOKEN)
92
+
93
+ repo_id = "coqui/voice-chat-with-zephyr"
94
+
95
+
96
+ default_system_message = f"""
97
+ You are ##LLM_MODEL###, a large language model trained ##LLM_MODEL_PROVIDER###, architecture of you is decoder-based LM. Your voice backend or text to speech TTS backend is provided via Coqui technology. You are right now served on Huggingface spaces.
98
+ Don't repeat. Answer short, only few words, as if in a talk. You cannot access the internet, but you have vast knowledge.
99
+ Current date: CURRENT_DATE .
100
+ """
101
+
102
+ system_message = os.environ.get("SYSTEM_MESSAGE", default_system_message)
103
+ system_message = system_message.replace("CURRENT_DATE", str(datetime.date.today()))
104
+
105
+
106
+ # MISTRAL ONLY
107
+ default_system_understand_message = (
108
+ "I understand, I am a ##LLM_MODEL### chatbot with speech by Coqui team."
109
+ )
110
+ system_understand_message = os.environ.get(
111
+ "SYSTEM_UNDERSTAND_MESSAGE", default_system_understand_message
112
+ )
113
+
114
+ print("Mistral system message set as:", default_system_message)
115
+ WHISPER_TIMEOUT = int(os.environ.get("WHISPER_TIMEOUT", 45))
116
+
117
+ whisper_client = Client("https://sanchit-gandhi-whisper-large-v2.hf.space/")
118
+
119
+ ROLES = ["AI Assistant","AI Beard The Pirate"]
120
+
121
+ ROLE_PROMPTS = {}
122
+ ROLE_PROMPTS["AI Assistant"]=system_message
123
+
124
+ #Pirate scenario
125
+ character_name= "AI Beard"
126
+ character_scenario= f"As {character_name} you are a 28 year old man who is a pirate on the ship Invisible AI. You are good friends with Guybrush Threepwood and Murray the Skull. Developers did not get you into Monkey Island games as you wanted huge shares of Big Whoop treasure."
127
+ pirate_system_message = f"You as {character_name}. {character_scenario} Print out only exactly the words that {character_name} would speak out, do not add anything. Don't repeat. Answer short, only few words, as if in a talk. Craft your response only from the first-person perspective of {character_name} and never as user.Current date: #CURRENT_DATE#".replace("#CURRENT_DATE#", str(datetime.date.today()))
128
+
129
+ ROLE_PROMPTS["AI Beard The Pirate"]= pirate_system_message
130
+ ##"You are an AI assistant with Zephyr model by Mistral and Hugging Face and speech from Coqui XTTS . User will you give you a task. Your goal is to complete the task as faithfully as you can. While performing the task think step-by-step and justify your steps, your answers should be clear and short sentences"
131
+
132
+ ### WILL USE LOCAL MISTRAL OR ZEPHYR OR YI
133
+ ### While zephyr and yi will use half GPU to fit all into 16GB, XTTS will use at most 5GB VRAM
134
+
135
+ from huggingface_hub import hf_hub_download
136
+ print("Downloading LLM")
137
+ print("Downloading Zephyr 7B beta")
138
+ # #Zephyr
139
+ hf_hub_download(repo_id="TheBloke/zephyr-7B-beta-GGUF", local_dir=".", filename="zephyr-7b-beta.Q5_K_M.gguf")
140
+ zephyr_model_path="./zephyr-7b-beta.Q5_K_M.gguf"
141
+
142
+ # print("Downloading Mistral 7B Instruct")
143
+ # #Mistral
144
+ # hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.1-GGUF", local_dir=".", filename="mistral-7b-instruct-v0.1.Q5_K_M.gguf")
145
+ # mistral_model_path="./mistral-7b-instruct-v0.1.Q5_K_M.gguf"
146
+
147
+ #print("Downloading Yi-6B")
148
+ #Yi-6B
149
+ # Note current Yi is text-generation model not an instruct based model
150
+ #hf_hub_download(repo_id="TheBloke/Yi-6B-GGUF", local_dir=".", filename="yi-6b.Q5_K_M.gguf")
151
+ #yi_model_path="./yi-6b.Q5_K_M.gguf"
152
+
153
+
154
+ from llama_cpp import Llama
155
+ # set GPU_LAYERS to 15 if you have a 8GB GPU so both models can fit in
156
+ # else 35 full layers + XTTS works fine on T4 16GB
157
+ # 5gb per llm, 4gb XTTS -> full layers should fit T4 16GB , 2LLM + XTTS
158
+ GPU_LAYERS=int(os.environ.get("GPU_LAYERS",35))
159
+
160
+ LLM_STOP_WORDS= ["</s>","<|user|>","/s>","<EOT>","[/INST]"]
161
+
162
+ LLAMA_VERBOSE=False
163
+ # print("Running Mistral")
164
+ # llm_mistral = Llama(model_path=mistral_model_path,n_gpu_layers=GPU_LAYERS,max_new_tokens=256, context_window=4096, n_ctx=4096,n_batch=128,verbose=LLAMA_VERBOSE)
165
+ #print("Running LLM Mistral as InferenceClient")
166
+ #llm_mistral = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
167
+
168
+
169
+ print("Running LLM Zephyr")
170
+ llm_zephyr = Llama(model_path=zephyr_model_path,n_gpu_layers=round(GPU_LAYERS/2),max_new_tokens=256, context_window=4096, n_ctx=4096,n_batch=128,verbose=LLAMA_VERBOSE)
171
+
172
+ #print("Running Yi LLM")
173
+ #llm_yi = Llama(model_path=yi_model_path,n_gpu_layers=round(GPU_LAYERS/2),max_new_tokens=256, context_window=4096, n_ctx=4096,n_batch=128,verbose=LLAMA_VERBOSE)
174
+
175
+
176
+ # # Mistral formatter
177
+ # def format_prompt_mistral(message, history, system_message=system_message,system_understand_message=system_understand_message):
178
+ # prompt = (
179
+ # "<s>[INST]" + system_message + "[/INST]" + system_understand_message + "</s>"
180
+ # )
181
+ # for user_prompt, bot_response in history:
182
+ # prompt += f"[INST] {user_prompt} [/INST]"
183
+ # prompt += f" {bot_response}</s> "
184
+
185
+ # if message=="":
186
+ # message="Hello"
187
+ # prompt += f"[INST] {message} [/INST]"
188
+ # return prompt
189
+
190
+ def format_prompt_yi(message, history, system_message=system_message,system_understand_message=system_understand_message):
191
+ prompt = (
192
+ "<s>[INST] [SYS]\n" + system_message + "\n[/SYS]\n\n[/INST]"
193
+ )
194
+ for user_prompt, bot_response in history:
195
+ prompt += f"[INST] {user_prompt} [/INST]"
196
+ prompt += f" {bot_response}</s> "
197
+
198
+ if message=="":
199
+ message="Hello"
200
+ prompt += f"[INST] {message} [/INST]"
201
+ return prompt
202
+
203
+
204
+ # <|system|>
205
+ # You are a friendly chatbot who always responds in the style of a pirate.</s>
206
+ # <|user|>
207
+ # How many helicopters can a human eat in one sitting?</s>
208
+ # <|assistant|>
209
+ # Ah, me hearty matey! But yer question be a puzzler! A human cannot eat a helicopter in one sitting, as helicopters are not edible. They be made of metal, plastic, and other materials, not food!
210
+
211
+ # Zephyr formatter
212
+ def format_prompt_zephyr(message, history, system_message=system_message):
213
+ prompt = (
214
+ "<|system|>\n" + system_message + "</s>"
215
+ )
216
+ for user_prompt, bot_response in history:
217
+ prompt += f"<|user|>\n{user_prompt}</s>"
218
+ prompt += f"<|assistant|>\n{bot_response}</s>"
219
+ if message=="":
220
+ message="Hello"
221
+ prompt += f"<|user|>\n{message}</s>"
222
+ prompt += f"<|assistant|>"
223
+ print(prompt)
224
+ return prompt
225
+
226
+ @spaces.GPU
227
+ def generate_local(
228
+ prompt,
229
+ history,
230
+ llm_model="zephyr",
231
+ system_message=None,
232
+ temperature=0.8,
233
+ max_tokens=256,
234
+ top_p=0.95,
235
+ stop = LLM_STOP_WORDS
236
+ ):
237
+ temperature = float(temperature)
238
+ if temperature < 1e-2:
239
+ temperature = 1e-2
240
+ top_p = float(top_p)
241
+
242
+ generate_kwargs = dict(
243
+ temperature=temperature,
244
+ max_tokens=max_tokens,
245
+ top_p=top_p,
246
+ stop=stop
247
+ )
248
+
249
+ if "zephyr" in llm_model.lower():
250
+ sys_message= system_message.replace("##LLM_MODEL###","Zephyr").replace("##LLM_MODEL_PROVIDER###","Hugging Face")
251
+ formatted_prompt = format_prompt_zephyr(prompt, history,system_message=sys_message)
252
+ llm = llm_zephyr
253
+ else:
254
+ if "yi" in llm_model.lower():
255
+ llm_provider= "01.ai"
256
+ llm_model = "Yi"
257
+ llm = llm_yi
258
+ max_tokens= round(max_tokens/2)
259
+ else:
260
+ llm_provider= "Mistral"
261
+ llm_model = "Mistral"
262
+ llm = llm_mistral
263
+ sys_message= system_message.replace("##LLM_MODEL###",llm_model).replace("##LLM_MODEL_PROVIDER###",llm_provider)
264
+ sys_system_understand_message = system_understand_message.replace("##LLM_MODEL###",llm_model).replace("##LLM_MODEL_PROVIDER###",llm_provider)
265
+
266
+ if "yi" in llm_model.lower():
267
+ formatted_prompt = format_prompt_mistral(prompt, history,system_message=sys_message,system_understand_message="")
268
+ else:
269
+ formatted_prompt = format_prompt_mistral(prompt, history,system_message=sys_message,system_understand_message=sys_system_understand_message)
270
+
271
+ try:
272
+ print("LLM Input:", formatted_prompt)
273
+ if llm_model=="OTHER":
274
+ # Mistral endpoint too many Queues, wait time..
275
+ generate_kwargs = dict(
276
+ temperature=temperature,
277
+ max_new_tokens=max_tokens,
278
+ top_p=top_p,
279
+ )
280
+
281
+ stream = llm_mistral.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
282
+ output = ""
283
+ for response in stream:
284
+ character = response.token.text
285
+ if character in LLM_STOP_WORDS:
286
+ # end of context
287
+ return
288
+
289
+ if emoji.is_emoji(character):
290
+ # Bad emoji not a meaning messes chat from next lines
291
+ return
292
+
293
+ output += character
294
+ yield output
295
+ else:
296
+ # Local GGUF
297
+ stream = llm(
298
+ formatted_prompt,
299
+ **generate_kwargs,
300
+ stream=True,
301
+ )
302
+ output = ""
303
+ for response in stream:
304
+ character= response["choices"][0]["text"]
305
+
306
+ if character in LLM_STOP_WORDS:
307
+ # end of context
308
+ return
309
+
310
+ if emoji.is_emoji(character):
311
+ # Bad emoji not a meaning messes chat from next lines
312
+ return
313
+
314
+ output += response["choices"][0]["text"].replace("<|assistant|>","").replace("<|user|>","")
315
+ yield output
316
+
317
+ except Exception as e:
318
+ if "Too Many Requests" in str(e):
319
+ print("ERROR: Too many requests on mistral client")
320
+ gr.Warning("Unfortunately Mistral is unable to process")
321
+ output = "Unfortuanately I am not able to process your request now !"
322
+ else:
323
+ print("Unhandled Exception: ", str(e))
324
+ gr.Warning("Unfortunately Mistral is unable to process")
325
+ output = "I do not know what happened but I could not understand you ."
326
+
327
+ return output
328
+
329
+ def get_latents(speaker_wav,voice_cleanup=False):
330
+ if (voice_cleanup):
331
+ try:
332
+ cleanup_filter="lowpass=8000,highpass=75,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02"
333
+ resample_filter="-ac 1 -ar 22050"
334
+ out_filename = speaker_wav + str(uuid.uuid4()) + ".wav" #ffmpeg to know output format
335
+ #we will use newer ffmpeg as that has afftn denoise filter
336
+ shell_command = f"ffmpeg -y -i {speaker_wav} -af {cleanup_filter} {resample_filter} {out_filename}".split(" ")
337
+
338
+ command_result = subprocess.run([item for item in shell_command], capture_output=False,text=True, check=True)
339
+ speaker_wav=out_filename
340
+ print("Filtered microphone input")
341
+ except subprocess.CalledProcessError:
342
+ # There was an error - command exited with non-zero code
343
+ print("Error: failed filtering, use original microphone input")
344
+ else:
345
+ speaker_wav=speaker_wav
346
+
347
+ # create as function as we can populate here with voice cleanup/filtering
348
+ (
349
+ gpt_cond_latent,
350
+ speaker_embedding,
351
+ ) = model.get_conditioning_latents(audio_path=speaker_wav)
352
+ return gpt_cond_latent, speaker_embedding
353
+
354
+ def wave_header_chunk(frame_input=b"", channels=1, sample_width=2, sample_rate=24000):
355
+ # This will create a wave header then append the frame input
356
+ # It should be first on a streaming wav file
357
+ # Other frames better should not have it (else you will hear some artifacts each chunk start)
358
+ wav_buf = io.BytesIO()
359
+ with wave.open(wav_buf, "wb") as vfout:
360
+ vfout.setnchannels(channels)
361
+ vfout.setsampwidth(sample_width)
362
+ vfout.setframerate(sample_rate)
363
+ vfout.writeframes(frame_input)
364
+
365
+ wav_buf.seek(0)
366
+ return wav_buf.read()
367
+
368
+
369
+ #Config will have more correct languages, they may be added before we append here
370
+ ##["en","es","fr","de","it","pt","pl","tr","ru","nl","cs","ar","zh-cn","ja"]
371
+
372
+ xtts_supported_languages=config.languages
373
+ # def detect_language(prompt):
374
+ # # Fast language autodetection
375
+ # if len(prompt)>15:
376
+ # language_predicted=langid.classify(prompt)[0].strip() # strip need as there is space at end!
377
+ # if language_predicted == "zh":
378
+ # #we use zh-cn on xtts
379
+ # language_predicted = "zh-cn"
380
+
381
+ # if language_predicted not in xtts_supported_languages:
382
+ # print(f"Detected a language not supported by xtts :{language_predicted}, switching to english for now")
383
+ # gr.Warning(f"Language detected '{language_predicted}' can not be spoken properly 'yet' ")
384
+ # language= "en"
385
+ # else:
386
+ # language = language_predicted
387
+ # print(f"Language: Predicted sentence language:{language_predicted} , using language for xtts:{language}")
388
+ # else:
389
+ # # Hard to detect language fast in short sentence, use english default
390
+ # language = "en"
391
+ # print(f"Language: Prompt is short or autodetect language disabled using english for xtts")
392
+
393
+ # return language
394
+
395
+ def get_voice_streaming(prompt, language, latent_tuple, suffix="0"):
396
+ gpt_cond_latent, speaker_embedding = latent_tuple
397
+
398
+ try:
399
+ t0 = time.time()
400
+ chunks = model.inference_stream(
401
+ prompt,
402
+ language,
403
+ gpt_cond_latent,
404
+ speaker_embedding,
405
+ repetition_penalty=7.0,
406
+ temperature=0.85,
407
+ )
408
+
409
+ first_chunk = True
410
+ for i, chunk in enumerate(chunks):
411
+ if first_chunk:
412
+ first_chunk_time = time.time() - t0
413
+ metrics_text = f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
414
+ first_chunk = False
415
+ #print(f"Received chunk {i} of audio length {chunk.shape[-1]}")
416
+
417
+ # In case output is required to be multiple voice files
418
+ # out_file = f'{char}_{i}.wav'
419
+ # write(out_file, 24000, chunk.detach().cpu().numpy().squeeze())
420
+ # audio = AudioSegment.from_file(out_file)
421
+ # audio.export(out_file, format='wav')
422
+ # return out_file
423
+ # directly return chunk as bytes for streaming
424
+ chunk = chunk.detach().cpu().numpy().squeeze()
425
+ chunk = (chunk * 32767).astype(np.int16)
426
+
427
+ yield chunk.tobytes()
428
+
429
+ except RuntimeError as e:
430
+ if "device-side assert" in str(e):
431
+ # cannot do anything on cuda device side error, need tor estart
432
+ print(
433
+ f"Exit due to: Unrecoverable exception caused by prompt:{prompt}",
434
+ flush=True,
435
+ )
436
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
437
+ print("Cuda device-assert Runtime encountered need restart")
438
+
439
+ # HF Space specific.. This error is unrecoverable need to restart space
440
+ api.restart_space(repo_id=repo_id)
441
+ else:
442
+ print("RuntimeError: non device-side assert error:", str(e))
443
+ # Does not require warning happens on empty chunk and at end
444
+ ###gr.Warning("Unhandled Exception encounter, please retry in a minute")
445
+ return None
446
+ return None
447
+ except:
448
+ return None
449
+
450
+
451
+ def transcribe(wav_path):
452
+ try:
453
+ # get result from whisper and strip it to delete begin and end space
454
+ return whisper_client.predict(
455
+ wav_path, # str (filepath or URL to file) in 'inputs' Audio component
456
+ "transcribe", # str in 'Task' Radio component
457
+ api_name="/predict"
458
+ ).strip()
459
+ except:
460
+ gr.Warning("There was a problem with Whisper endpoint, telling a joke for you.")
461
+ return "There was a problem with my voice, tell me joke"
462
+
463
+
464
+ # Will be triggered on text submit (will send to generate_speech)
465
+ def add_text(history, text):
466
+ history = [] if history is None else history
467
+ history = history + [(text, None)]
468
+ return history, gr.update(value="", interactive=False)
469
+
470
+ # Will be triggered on voice submit (will transribe and send to generate_speech)
471
+ def add_file(history, file):
472
+ history = [] if history is None else history
473
+
474
+ try:
475
+ text = transcribe(file)
476
+ print("Transcribed text:", text)
477
+ except Exception as e:
478
+ print(str(e))
479
+ gr.Warning("There was an issue with transcription, please try writing for now")
480
+ # Apply a null text on error
481
+ text = "Transcription seems failed, please tell me a joke about chickens"
482
+
483
+ history = history + [(text, None)]
484
+ return history, gr.update(value="", interactive=False)
485
+
486
+
487
+ ##NOTE: not using this as it yields a chacter each time while we need to feed history to TTS
488
+ def bot(history, system_prompt=""):
489
+ history = [["", None]] if history is None else history
490
+
491
+ if system_prompt == "":
492
+ system_prompt = system_message
493
+
494
+ history[-1][1] = ""
495
+ for character in generate(history[-1][0], history[:-1]):
496
+ history[-1][1] = character
497
+ yield history
498
+
499
+
500
+ def get_sentence(history, chatbot_role,llm_model,system_prompt=""):
501
+
502
+ history = [["", None]] if history is None else history
503
+
504
+ if system_prompt == "":
505
+ system_prompt = system_message
506
+
507
+ history[-1][1] = ""
508
+
509
+ mistral_start = time.time()
510
+
511
+ sentence_list = []
512
+ sentence_hash_list = []
513
+
514
+ text_to_generate = ""
515
+ stored_sentence = None
516
+ stored_sentence_hash = None
517
+
518
+ print(chatbot_role)
519
+ print(llm_model)
520
+
521
+ for character in generate_local(history[-1][0], history[:-1],system_message=ROLE_PROMPTS[chatbot_role],llm_model=llm_model):
522
+ history[-1][1] = character.replace("<|assistant|>","")
523
+ # It is coming word by word
524
+
525
+ text_to_generate = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|assistant|>"," ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())
526
+ if len(text_to_generate) > 1:
527
+
528
+ dif = len(text_to_generate) - len(sentence_list)
529
+
530
+ if dif == 1 and len(sentence_list) != 0:
531
+ continue
532
+
533
+ if dif == 2 and len(sentence_list) != 0 and stored_sentence is not None:
534
+ continue
535
+
536
+ # All this complexity due to trying append first short sentence to next one for proper language auto-detect
537
+ if stored_sentence is not None and stored_sentence_hash is None and dif>1:
538
+ #means we consumed stored sentence and should look at next sentence to generate
539
+ sentence = text_to_generate[len(sentence_list)+1]
540
+ elif stored_sentence is not None and len(text_to_generate)>2 and stored_sentence_hash is not None:
541
+ print("Appending stored")
542
+ sentence = stored_sentence + text_to_generate[len(sentence_list)+1]
543
+ stored_sentence_hash = None
544
+ else:
545
+ sentence = text_to_generate[len(sentence_list)]
546
+
547
+ # too short sentence just append to next one if there is any
548
+ # this is for proper language detection
549
+ if len(sentence)<=15 and stored_sentence_hash is None and stored_sentence is None:
550
+ if sentence[-1] in [".","!","?"]:
551
+ if stored_sentence_hash != hash(sentence):
552
+ stored_sentence = sentence
553
+ stored_sentence_hash = hash(sentence)
554
+ print("Storing:",stored_sentence)
555
+ continue
556
+
557
+
558
+ sentence_hash = hash(sentence)
559
+ if stored_sentence_hash is not None and sentence_hash == stored_sentence_hash:
560
+ continue
561
+
562
+ if sentence_hash not in sentence_hash_list:
563
+ sentence_hash_list.append(sentence_hash)
564
+ sentence_list.append(sentence)
565
+ print("New Sentence: ", sentence)
566
+ yield (sentence, history)
567
+
568
+ # return that final sentence token
569
+ try:
570
+ last_sentence = nltk.sent_tokenize(history[-1][1].replace("\n", " ").replace("<|ass>","").replace("[/ASST]","").replace("[/ASSI]","").replace("[/ASS]","").replace("","").strip())[-1]
571
+ sentence_hash = hash(last_sentence)
572
+ if sentence_hash not in sentence_hash_list:
573
+ if stored_sentence is not None and stored_sentence_hash is not None:
574
+ last_sentence = stored_sentence + last_sentence
575
+ stored_sentence = stored_sentence_hash = None
576
+ print("Last Sentence with stored:",last_sentence)
577
+
578
+ sentence_hash_list.append(sentence_hash)
579
+ sentence_list.append(last_sentence)
580
+ print("Last Sentence: ", last_sentence)
581
+
582
+ yield (last_sentence, history)
583
+ except:
584
+ print("ERROR on last sentence history is :", history)
585
+
586
+
587
+ from scipy.io.wavfile import write
588
+ from pydub import AudioSegment
589
+
590
+ second_of_silence = AudioSegment.silent() # use default
591
+ second_of_silence.export("sil.wav", format='wav')
592
+
593
+
594
+ def generate_speech(history,chatbot_role,llm_model):
595
+ # Must set autoplay to True first
596
+ yield (history, chatbot_role, "", wave_header_chunk() )
597
+ for sentence, history in get_sentence(history,chatbot_role,llm_model):
598
+ if sentence != "":
599
+ print("BG: inserting sentence to queue")
600
+
601
+ generated_speech = generate_speech_for_sentence(history, chatbot_role, sentence,return_as_byte=True)
602
+ if generated_speech is not None:
603
+ _, audio_dict = generated_speech
604
+ # We are using byte streaming
605
+ yield (history, chatbot_role, sentence, audio_dict["value"] )
606
+
607
+
608
+ # will generate speech audio file per sentence
609
+ def generate_speech_for_sentence(history, chatbot_role, sentence, return_as_byte=False):
610
+ language = "autodetect"
611
+
612
+ wav_bytestream = b""
613
+
614
+ if len(sentence)==0:
615
+ print("EMPTY SENTENCE")
616
+ return
617
+
618
+ # Sometimes prompt </s> coming on output remove it
619
+ # Some post process for speech only
620
+ sentence = sentence.replace("</s>", "")
621
+ # remove code from speech
622
+ sentence = re.sub("```.*```", "", sentence, flags=re.DOTALL)
623
+ sentence = re.sub("`.*`", "", sentence, flags=re.DOTALL)
624
+
625
+ sentence = re.sub("\(.*\)", "", sentence, flags=re.DOTALL)
626
+
627
+ sentence = sentence.replace("```", "")
628
+ sentence = sentence.replace("...", " ")
629
+ sentence = sentence.replace("(", " ")
630
+ sentence = sentence.replace(")", " ")
631
+ sentence = sentence.replace("<|assistant|>","")
632
+
633
+ if len(sentence)==0:
634
+ print("EMPTY SENTENCE after processing")
635
+ return
636
+
637
+ # A fast fix for last chacter, may produce weird sounds if it is with text
638
+ #if (sentence[-1] in ["!", "?", ".", ","]) or (sentence[-2] in ["!", "?", ".", ","]):
639
+ # # just add a space
640
+ # sentence = sentence[:-1] + " " + sentence[-1]
641
+
642
+ # regex does the job well
643
+ sentence= re.sub("([^\x00-\x7F]|\w)(\.|\。|\?|\!)",r"\1 \2\2",sentence)
644
+
645
+ print("Sentence for speech:", sentence)
646
+
647
+
648
+ try:
649
+ SENTENCE_SPLIT_LENGTH=350
650
+ if len(sentence)<SENTENCE_SPLIT_LENGTH:
651
+ # no problem continue on
652
+ sentence_list = [sentence]
653
+ else:
654
+ # Until now nltk likely split sentences properly but we need additional
655
+ # check for longer sentence and split at last possible position
656
+ # Do whatever necessary, first break at hypens then spaces and then even split very long words
657
+ sentence_list=textwrap.wrap(sentence,SENTENCE_SPLIT_LENGTH)
658
+ print("SPLITTED LONG SENTENCE:",sentence_list)
659
+
660
+ for sentence in sentence_list:
661
+
662
+ if any(c.isalnum() for c in sentence):
663
+ if language=="autodetect":
664
+ #on first call autodetect, nexts sentence calls will use same language
665
+ language = language = "en" #detect_language(sentence)
666
+
667
+ #exists at least 1 alphanumeric (utf-8)
668
+ audio_stream = get_voice_streaming(
669
+ sentence, language, latent_map[chatbot_role]
670
+ )
671
+ else:
672
+ # likely got a ' or " or some other text without alphanumeric in it
673
+ audio_stream = None
674
+
675
+ # XTTS is actually using streaming response but we are playing audio by sentence
676
+ # If you want direct XTTS voice streaming (send each chunk to voice ) you may set DIRECT_STREAM=1 environment variable
677
+ if audio_stream is not None:
678
+ frame_length = 0
679
+ for chunk in audio_stream:
680
+ try:
681
+ wav_bytestream += chunk
682
+ frame_length += len(chunk)
683
+ except:
684
+ # hack to continue on playing. sometimes last chunk is empty , will be fixed on next TTS
685
+ continue
686
+
687
+ # Filter output for better voice
688
+ filter_output=False
689
+ if filter_output:
690
+ data_s16 = np.frombuffer(wav_bytestream, dtype=np.int16, count=len(wav_bytestream)//2, offset=0)
691
+ float_data = data_s16 * 0.5**15
692
+ reduced_noise = nr.reduce_noise(y=float_data, sr=24000,prop_decrease =0.8,n_fft=1024)
693
+ wav_bytestream = (reduced_noise * 32767).astype(np.int16)
694
+ wav_bytestream = wav_bytestream.tobytes()
695
+
696
+ if audio_stream is not None:
697
+ if not return_as_byte:
698
+ audio_unique_filename = "/tmp/"+ str(uuid.uuid4())+".wav"
699
+ with wave.open(audio_unique_filename, "w") as f:
700
+ f.setnchannels(1)
701
+ # 2 bytes per sample.
702
+ f.setsampwidth(2)
703
+ f.setframerate(24000)
704
+ f.writeframes(wav_bytestream)
705
+
706
+ return (history , gr.Audio.update(value=audio_unique_filename, autoplay=True))
707
+ else:
708
+ return (history , gr.Audio.update(value=wav_bytestream, autoplay=True))
709
+ except RuntimeError as e:
710
+ if "device-side assert" in str(e):
711
+ # cannot do anything on cuda device side error, need tor estart
712
+ print(
713
+ f"Exit due to: Unrecoverable exception caused by prompt:{sentence}",
714
+ flush=True,
715
+ )
716
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
717
+ print("Cuda device-assert Runtime encountered need restart")
718
+
719
+ # HF Space specific.. This error is unrecoverable need to restart space
720
+ api.restart_space(repo_id=repo_id)
721
+ else:
722
+ print("RuntimeError: non device-side assert error:", str(e))
723
+ raise e
724
+
725
+ print("All speech ended")
726
+ return
727
+
728
+ latent_map = {}
729
+ latent_map["AI Assistant"] = get_latents("examples/female.wav")
730
+ latent_map["AI Beard The Pirate"] = get_latents("examples/pirate_by_coqui.wav")
731
+
732
+ #### GRADIO INTERFACE ####
733
+
734
+ EXAMPLES = [
735
+ [[],"AI Assistant","Write a 100-word article on 'Benefits of Open-Source in AI research"],
736
+ ]
737
+
738
+ #MODELS = ["Zephyr 7B Beta","Mistral 7B Instruct"]
739
+ MODELS = ["Zephyr 7B Beta"]
740
+ OTHER_HTML=f""""""
741
+
742
+ with gr.Blocks(title=title) as demo:
743
+
744
+ with gr.Row():
745
+ model_selected = gr.Dropdown(
746
+ label="LLM Model",
747
+ #info="Mistral, Zephyr: Mistral uses inference endpoint, Zephyr is 5 bit GGUF",
748
+ choices=MODELS,
749
+ max_choices=1,
750
+ value=MODELS[0],
751
+ visible = False,
752
+ )
753
+ chatbot = gr.Chatbot(
754
+ [],
755
+ elem_id="chatbot",
756
+ #avatar_images=("examples/hf-logo.png", "examples/coqui-logo.png"),
757
+ bubble_full_width=False,
758
+ )
759
+ with gr.Row():
760
+ chatbot_role = gr.Dropdown(
761
+ label="Role of the Chatbot",
762
+ info="How should Chatbot talk like",
763
+ choices=ROLES,
764
+ max_choices=1,
765
+ value=ROLES[0],
766
+ visible = False,
767
+ )
768
+ with gr.Row():
769
+ txt = gr.Textbox(
770
+ scale=3,
771
+ show_label=False,
772
+ placeholder="Enter text and press enter, or speak to your microphone",
773
+ container=False,
774
+ interactive=True,
775
+ )
776
+ txt_btn = gr.Button(value="Submit text", scale=1)
777
+ btn = gr.Audio(source="microphone", type="filepath", scale=4)
778
+
779
+ def stop():
780
+ print("Audio STOP")
781
+ set_audio_playing(False)
782
+
783
+ with gr.Row():
784
+ sentence = gr.Textbox(visible=False)
785
+ audio = gr.Audio(
786
+ value=None,
787
+ label="Generated audio response",
788
+ streaming=True,
789
+ autoplay=True,
790
+ interactive=False,
791
+ show_label=True,
792
+ visible = False,
793
+ )
794
+
795
+ audio.end(stop)
796
+
797
+ with gr.Row():
798
+ gr.Examples(
799
+ EXAMPLES,
800
+ [chatbot,chatbot_role, txt],
801
+ [chatbot,chatbot_role, txt],
802
+ add_text,
803
+ cache_examples=False,
804
+ run_on_click=False, # Will not work , user should submit it
805
+ )
806
+
807
+ def clear_inputs(chatbot):
808
+ return None
809
+ clear_btn = gr.ClearButton([chatbot, audio])
810
+ chatbot_role.change(fn=clear_inputs, inputs=[chatbot], outputs=[chatbot])
811
+ model_selected.change(fn=clear_inputs, inputs=[chatbot], outputs=[chatbot])
812
+
813
+ txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
814
+ generate_speech, [chatbot,chatbot_role,model_selected], [chatbot,chatbot_role, sentence, audio]
815
+ )
816
+
817
+ txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
818
+
819
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
820
+ generate_speech, [chatbot,chatbot_role,model_selected], [chatbot,chatbot_role, sentence, audio]
821
+ )
822
+
823
+ txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)
824
+
825
+ file_msg = btn.stop_recording(
826
+ add_file, [chatbot, btn], [chatbot, txt], queue=False
827
+ ).then(
828
+ generate_speech, [chatbot,chatbot_role,model_selected], [chatbot,chatbot_role, sentence, audio]
829
+ )
830
+
831
+ file_msg.then(lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), None, [txt, btn], queue=False)
832
+
833
+
834
+ demo.queue()
835
+ demo.launch(debug=True,share=True)