Spaces:
Running
Running
File size: 5,943 Bytes
28d0c5f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import torch
import json
import os
import WordMatching as wm
import utilsFileIO
import pronunciationTrainer
import base64
import time
import audioread
import numpy as np
from torchaudio.transforms import Resample
trainer_SST_lambda = {}
trainer_SST_lambda['de'] = pronunciationTrainer.getTrainer("de")
trainer_SST_lambda['en'] = pronunciationTrainer.getTrainer("en")
transform = Resample(orig_freq=48000, new_freq=16000)
def lambda_handler(event, context):
data = json.loads(event['body'])
real_text = data['title']
file_bytes = base64.b64decode(
data['base64Audio'][22:].encode('utf-8'))
language = data['language']
if len(real_text) == 0:
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Credentials': "true",
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
},
'body': ''
}
start = time.time()
random_file_name = './'+utilsFileIO.generateRandomString()+'.ogg'
f = open(random_file_name, 'wb')
f.write(file_bytes)
f.close()
print('Time for saving binary in file: ', str(time.time()-start))
start = time.time()
signal, fs = audioread_load(random_file_name)
signal = transform(torch.Tensor(signal)).unsqueeze(0)
print('Time for loading .ogg file file: ', str(time.time()-start))
result = trainer_SST_lambda[language].processAudioForGivenText(
signal, real_text)
start = time.time()
os.remove(random_file_name)
print('Time for deleting file: ', str(time.time()-start))
start = time.time()
real_transcripts_ipa = ' '.join(
[word[0] for word in result['real_and_transcribed_words_ipa']])
matched_transcripts_ipa = ' '.join(
[word[1] for word in result['real_and_transcribed_words_ipa']])
real_transcripts = ' '.join(
[word[0] for word in result['real_and_transcribed_words']])
matched_transcripts = ' '.join(
[word[1] for word in result['real_and_transcribed_words']])
words_real = real_transcripts.lower().split()
mapped_words = matched_transcripts.split()
is_letter_correct_all_words = ''
for idx, word_real in enumerate(words_real):
mapped_letters, mapped_letters_indices = wm.get_best_mapped_words(
mapped_words[idx], word_real)
is_letter_correct = wm.getWhichLettersWereTranscribedCorrectly(
word_real, mapped_letters) # , mapped_letters_indices)
is_letter_correct_all_words += ''.join([str(is_correct)
for is_correct in is_letter_correct]) + ' '
pair_accuracy_category = ' '.join(
[str(category) for category in result['pronunciation_categories']])
print('Time to post-process results: ', str(time.time()-start))
res = {'real_transcript': result['recording_transcript'],
'ipa_transcript': result['recording_ipa'],
'pronunciation_accuracy': str(int(result['pronunciation_accuracy'])),
'real_transcripts': real_transcripts, 'matched_transcripts': matched_transcripts,
'real_transcripts_ipa': real_transcripts_ipa, 'matched_transcripts_ipa': matched_transcripts_ipa,
'pair_accuracy_category': pair_accuracy_category,
'start_time': result['start_time'],
'end_time': result['end_time'],
'is_letter_correct_all_words': is_letter_correct_all_words}
return json.dumps(res)
# From Librosa
def audioread_load(path, offset=0.0, duration=None, dtype=np.float32):
"""Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
"""
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + \
(int(np.round(sr_native * duration)) * n_channels)
n = 0
for frame in input_file:
frame = buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[: s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native
# From Librosa
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in ``x``
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = "<i{:d}".format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
|