seba3y commited on
Commit
3be22dd
1 Parent(s): f7e2294

Delete audio.py

Browse files
Files changed (1) hide show
  1. audio.py +0 -105
audio.py DELETED
@@ -1,105 +0,0 @@
1
- from transformers import AutoFeatureExtractor, WhisperForAudioClassification
2
- import torch
3
- # import librosa
4
-
5
-
6
-
7
- device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
8
- # device = 'cpu'
9
- print('Run on:', device)
10
-
11
- SAMPLEING_RATE = 16000
12
- MAX_LENGTH = SAMPLEING_RATE * 10 # 10 seconds
13
-
14
-
15
- fluency_model_name = "seba3y/whisper-tiny-fluency" #future use
16
- acc_model_name = 'seba3y/whisper-tiny-accuracy'
17
-
18
- fluency_feature = AutoFeatureExtractor.from_pretrained(fluency_model_name)
19
- fluency_model = WhisperForAudioClassification.from_pretrained(fluency_model_name).to(device)
20
- acc_feature = AutoFeatureExtractor.from_pretrained(acc_model_name)
21
- acc_model = WhisperForAudioClassification.from_pretrained(acc_model_name).to(device)
22
-
23
-
24
- def load_audio_from_path(audio, feature_extractor, max_length=MAX_LENGTH):
25
- # audio, _ = librosa.load(file_path, sr=SAMPLEING_RATE)
26
- _, audio = audio
27
- audio_length = len(audio)
28
- # Splitting the audio if it's longer than max_length
29
- segments = []
30
- for start in range(0, audio_length, max_length):
31
- end = min(start + max_length, audio_length)
32
- segment = audio[start:end]
33
- inputs = feature_extractor(segment, sampling_rate=SAMPLEING_RATE, return_tensors="pt", max_length=max_length, padding="max_length", ).input_features
34
- segments.append(inputs)
35
-
36
- return segments
37
-
38
-
39
- @torch.no_grad()
40
- def model_generate(inputs, model):
41
- logits = model(inputs.to(device))[0]
42
- return logits
43
-
44
-
45
- def postprocess(logits, model, noise=1):
46
- logits = noise * (logits.cpu() + 0.9)
47
- scores = logits.softmax(-1)[0]
48
- print(scores)
49
- ids = torch.argmax(scores, dim=-1).item()
50
- scores = scores.tolist()
51
- labels = model.config.id2label[ids]
52
- return labels, round(scores[ids], 2)
53
-
54
- def predict(segments, model, noise):
55
-
56
-
57
- all_logits = []
58
-
59
- for segment in segments:
60
- logits = model_generate(segment, model)
61
- all_logits.append(logits)
62
-
63
- # Aggregating the results (simple average)
64
- avg_logits = torch.mean(torch.stack(all_logits), dim=0)
65
- return postprocess(avg_logits, model, noise)
66
-
67
- def prdict_accuracy(file_path):
68
- Anoise = torch.tensor([100.618, .0118, 10.945, 30.419])
69
- result = predict(file_path, acc_model, Anoise)
70
- return result
71
-
72
- def predict_fluency(file_path):
73
- Fnoise = torch.tensor([5.618, 4.518, 2.145, 0.219])
74
- result = predict(file_path, fluency_model, Fnoise)
75
- return result
76
-
77
- def predict_all(file_path):
78
- Anoise = torch.tensor([5.618, 1.518, 10.945, 100.419])
79
- Fnoise = torch.tensor([3.618, 5.518, 3.045, 0.49])
80
- segments = load_audio_from_path(file_path, acc_feature)
81
- acc = predict(segments, acc_model, Anoise)
82
- fle = predict(segments, fluency_model, Fnoise)
83
- return acc, fle
84
-
85
- if __name__ == '__main__':
86
- file_path = r'uploads\audio.wav'
87
- print('start')
88
- result = predict_fluency(file_path)
89
- print('done')
90
- # print('Fluency of the speech:')
91
- # print("="*25)
92
- # print(result)
93
- # # for key, value in result.items():
94
- # # print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
95
- # # print()
96
- # # print("="*25)
97
- # # print()
98
- # print('Pronunciation Accuracy of the speech:')
99
- # print("="*25)
100
- # result = prdict_accuracy(file_path)
101
- # print(result)
102
- # for key, value in result.items():
103
- # print('Prediction:', key, "\nConfidinse:", round(value, 2) * 100, '%')
104
- # print()
105
- # print('='*25)