File size: 10,047 Bytes
4571fa6
0b1b9ce
 
 
 
 
 
6864843
0b1b9ce
45ba322
e40a4e4
def81e4
0b1b9ce
 
 
 
bbc3676
 
0b1b9ce
4c49282
0b1b9ce
 
 
 
 
 
 
 
 
 
 
 
 
71126de
0b1b9ce
 
71126de
0b1b9ce
 
 
 
 
71126de
f6cc7ad
0b1b9ce
 
3461539
a4c3e3f
 
40def4b
a4c3e3f
 
 
 
da7f2d9
47f1a14
da7f2d9
 
 
 
 
 
a4c3e3f
 
e1a15c6
a4c3e3f
 
 
 
a46f6ac
0b1b9ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b980ad
eb18247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b980ad
43b96ac
 
 
 
 
 
 
 
 
 
a46f6ac
 
da7f2d9
07cd3f9
4b4edc4
 
a46f6ac
 
4b980ad
0b1b9ce
5d407ad
0b1b9ce
 
 
 
 
 
 
 
 
 
5d407ad
0b1b9ce
 
 
 
5d407ad
0b1b9ce
 
 
 
5d407ad
0b1b9ce
eb18247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
843bc29
0b1b9ce
4c49282
843bc29
0b1b9ce
 
 
 
 
4a49a0f
 
 
 
 
 
2131849
 
1fb2d7f
43b96ac
18caf08
 
 
 
 
 
a714726
4b980ad
a46f6ac
eb18247
18caf08
4b980ad
 
eafa69e
eb18247
 
 
 
 
 
 
 
 
 
4c49282
 
0b1b9ce
4c49282
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
import gradio as gr
import json
import librosa
import os
import soundfile as sf
import tempfile
import uuid
import requests

import torch
import transformers

from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchMultiTaskAED
from nemo.collections.asr.parts.utils.transcribe_utils import get_buffered_pred_feat_multitaskAED

HF_TOKEN = os.environ.get('HF_TOKEN')

SAMPLE_RATE = 16000 # Hz
MAX_AUDIO_MINUTES = 10 # wont try to transcribe if longer than this

model = ASRModel.from_pretrained("nvidia/canary-1b")
model.eval()

# make sure beam size always 1 for consistency
model.change_decoding_strategy(None)
decoding_cfg = model.cfg.decoding
decoding_cfg.beam.beam_size = 1
model.change_decoding_strategy(decoding_cfg)

# setup for buffered inference
model.cfg.preprocessor.dither = 0.0
model.cfg.preprocessor.pad_to = 0

feature_stride = model.cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * 8 # 8 = model stride, which is 8 for FastConformer

frame_asr = FrameBatchMultiTaskAED(
	asr_model=model,
	frame_len=40.0,
	total_buffer=40.0,
	batch_size=16,
)

amp_dtype = torch.float16


llm_model = transformers.AutoModelForCausalLM.from_pretrained(
    "microsoft/Phi-3-mini-128k-instruct", 
    device_map="auto", 
    torch_dtype="auto", 
    trust_remote_code=True, 
)

generation_args = {
    "max_new_tokens": 500,
    "return_full_text": True,
    "temperature": 0.0,
    "do_sample": False,
}


tokenizer = transformers.AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")

llm_pipe = transformers.pipeline(
    "text-generation",
    model=llm_model,
    tokenizer=tokenizer,
)

def convert_audio(audio_filepath, tmpdir, utt_id):
	"""
	Convert all files to monochannel 16 kHz wav files.
	Do not convert and raise error if audio too long.
	Returns output filename and duration.
	"""

	data, sr = librosa.load(audio_filepath, sr=None, mono=True)

	duration = librosa.get_duration(y=data, sr=sr)

	if duration / 60.0 > MAX_AUDIO_MINUTES:
		raise gr.Error(
			f"This demo can transcribe up to {MAX_AUDIO_MINUTES} minutes of audio. "
			"If you wish, you may trim the audio using the Audio viewer in Step 1 "
			"(click on the scissors icon to start trimming audio)."
		)

	if sr != SAMPLE_RATE:
		data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)

	out_filename = os.path.join(tmpdir, utt_id + '.wav')

	# save output audio
	sf.write(out_filename, data, SAMPLE_RATE)

	return out_filename, duration


def transcribe(audio_filepath, src_lang, tgt_lang, pnc):

	if audio_filepath is None:
		raise gr.Error("Please provide some input audio: either upload an audio file or use the microphone")
	
	utt_id = uuid.uuid4()
	with tempfile.TemporaryDirectory() as tmpdir:
		converted_audio_filepath, duration = convert_audio(audio_filepath, tmpdir, str(utt_id))

		# map src_lang and tgt_lang from long versions to short
		LANG_LONG_TO_LANG_SHORT = {
			"English": "en",
			"Spanish": "es",
			"French": "fr",
			"German": "de",
		}
		if src_lang not in LANG_LONG_TO_LANG_SHORT.keys():
			raise ValueError(f"src_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
		else:
			src_lang = LANG_LONG_TO_LANG_SHORT[src_lang]
		
		if tgt_lang not in LANG_LONG_TO_LANG_SHORT.keys():
			raise ValueError(f"tgt_lang must be one of {LANG_LONG_TO_LANG_SHORT.keys()}")
		else:
			tgt_lang = LANG_LONG_TO_LANG_SHORT[tgt_lang]
		

		# infer taskname from src_lang and tgt_lang
		if src_lang == tgt_lang:
			taskname = "asr"
		else:
			taskname = "s2t_translation"

		# update pnc variable to be "yes" or "no"
		pnc = "yes" if pnc else "no"

		# make manifest file and save
		manifest_data = {
			"audio_filepath": converted_audio_filepath,
			"source_lang": src_lang,
			"target_lang": tgt_lang,
			"taskname": taskname,
			"pnc": pnc,
			"answer": "predict",
			"duration": str(duration),
		}

		manifest_filepath = os.path.join(tmpdir, f'{utt_id}.json')

		with open(manifest_filepath, 'w') as fout:
			line = json.dumps(manifest_data)
			fout.write(line + '\n')

		# call transcribe, passing in manifest filepath
		if duration < 40:
			output_text = model.transcribe(manifest_filepath)[0]
		else: # do buffered inference
			with torch.cuda.amp.autocast(dtype=amp_dtype): # TODO: make it work if no cuda
				with torch.no_grad():
					hyps = get_buffered_pred_feat_multitaskAED(
						frame_asr,
						model.cfg.preprocessor,
						model_stride_in_secs,
						model.device,
						manifest=manifest_filepath,
						filepaths=None,
					)

					output_text = hyps[0].text

	return output_text

# add logic to make sure dropdown menus only suggest valid combos
def on_src_or_tgt_lang_change(src_lang_value, tgt_lang_value, pnc_value):
	"""Callback function for when src_lang or tgt_lang dropdown menus are changed.
	Args:
		src_lang_value(string), tgt_lang_value (string), pnc_value(bool) - the current 
			chosen "values" of each Gradio component
	Returns:
		src_lang, tgt_lang, pnc - these are the new Gradio components that will be displayed
	"""

	if src_lang_value == "English" and tgt_lang_value == "English":
		# src_lang and tgt_lang can go anywhere
		src_lang = gr.Dropdown(
			choices=["English", "Spanish", "French", "German"],
			value=src_lang_value,
			label="Input audio is spoken in:"
		)
		tgt_lang = gr.Dropdown(
			choices=["English", "Spanish", "French", "German"],
			value=tgt_lang_value,
			label="Transcribe in language:"
		)
	elif src_lang_value == "English": 
		# src is English & tgt is non-English
		# => src can only be English or current tgt_lang_values
		# & tgt can be anything
		src_lang = gr.Dropdown(
			choices=["English", tgt_lang_value],
			value=src_lang_value,
			label="Input audio is spoken in:"
		)
		tgt_lang = gr.Dropdown(
			choices=["English", "Spanish", "French", "German"],
			value=tgt_lang_value,
			label="Transcribe in language:"
		)
	elif tgt_lang_value == "English": 
		# src is non-English & tgt is English
		# => src can be anything
		# & tgt can only be English or current src_lang_value
		src_lang = gr.Dropdown(
			choices=["English", "Spanish", "French", "German"],
			value=src_lang_value,
			label="Input audio is spoken in:"
		)
		tgt_lang = gr.Dropdown(
			choices=["English", src_lang_value],
			value=tgt_lang_value,
			label="Transcribe in language:"
		)
	else:
		# both src and tgt are non-English
		# => both src and tgt can only be switch to English or themselves
		src_lang = gr.Dropdown(
			choices=["English", src_lang_value],
			value=src_lang_value,
			label="Input audio is spoken in:"
		)
		tgt_lang = gr.Dropdown(
			choices=["English", tgt_lang_value],
			value=tgt_lang_value,
			label="Transcribe in language:"
		)
	# let pnc be anything if src_lang_value == tgt_lang_value, else fix to True
	if src_lang_value == tgt_lang_value:
		pnc = gr.Checkbox(
			value=pnc_value,
			label="Punctuation & Capitalization in transcript?",
			interactive=True
		)
	else:
		pnc = gr.Checkbox(
			value=True,
			label="Punctuation & Capitalization in transcript?",
			interactive=False
		)
	return src_lang, tgt_lang, pnc

def txt2speech(text):
    API_URL = "https://api-inference.huggingface.co/models/espnet/kan-bayashi_ljspeech_vits"
    headers = {"Authorization": f"Bearer {HF_TOKEN}"}
    payloads = {'inputs': text}

    response = requests.post(API_URL, headers=headers, json=payloads)
    
    with open('audio_out.mp3', 'wb') as file:
        file.write(response.content)

def main(audio_filepath, src_lang, tgt_lang, pnc):
    translated = transcribe(audio_filepath, src_lang, tgt_lang, pnc)
    answer = llm_pipe(translated, **generation_args)
    txt2speech(answer[0]['generated_text'])
    # return [answer[0]['generated_text'], 'audio_out.mp3']
    return 'audio_out.mp3'
    
    

with gr.Blocks(
	title="MyAlexa",
	css="""
		textarea { font-size: 18px;}
		#model_output_text_box span {
			font-size: 18px;
			font-weight: bold;
		}
	""",
	theme=gr.themes.Default(text_size=gr.themes.sizes.text_lg) # make text slightly bigger (default is text_md )
) as demo:

	gr.HTML("<h1 style='text-align: center'>MyAlexa</h1>")

	with gr.Row():
		with gr.Column():
			gr.HTML(
				"<p>Upload an audio file or record with your microphone.</p>"
			)

			audio_file = gr.Audio(sources=["microphone", "upload"], type="filepath")

			gr.HTML("<p>Choose the input and output language.</p>")

			src_lang = gr.Dropdown(
				choices=["English", "Spanish", "French", "German"],
				value="English",
				label="Input audio is spoken in:"
			)

			with gr.Column():
				tgt_lang = gr.Dropdown(
					choices=["English", "Spanish", "French", "German"],
					value="English",
					label="Transcribe in language:"
				)
				pnc = gr.Checkbox(
					value=True,
					label="Punctuation & Capitalization in transcript?",
				)

		with gr.Column():

			gr.HTML("<p>Run the model.</p>")

			go_button = gr.Button(
				value="Run model",
				variant="primary", # make "primary" so it stands out (default is "secondary")
			)

			audio_out = gr.Audio(label="Generated Audio", type="filepath", elem_id="audio_out", interactive=False)


            
                # audio_out = gr.Audio(label="Generated Audio", type="filepath", elem_id="audio_out", interactive=False)
            

            # audio_out = gr.Audio(label="Generated Audio", type="filepath", elem_id="audio_out", interactive=False)

			# model_output_text_box = gr.Textbox(
			# 	label="Model Output",
			# 	elem_id="model_output_text_box",
			# )

            # audio_out = gr.Audio(label="Generated Audio", type="numpy", elem_id="audio_out")

	go_button.click(
		fn=main, 
		inputs = [audio_file, src_lang, tgt_lang, pnc],
		outputs = [audio_out]
	)

	# call on_src_or_tgt_lang_change whenever src_lang or tgt_lang dropdown menus are changed	
	src_lang.change(
		fn=on_src_or_tgt_lang_change,
		inputs=[src_lang, tgt_lang, pnc],
		outputs=[src_lang, tgt_lang, pnc],
	)
	tgt_lang.change(
		fn=on_src_or_tgt_lang_change,
		inputs=[src_lang, tgt_lang, pnc],
		outputs=[src_lang, tgt_lang, pnc],
	)


demo.queue()
demo.launch()