Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,24 +7,30 @@ openai.api_key = os.environ["OPENAI_API_KEY"]
|
|
7 |
|
8 |
model = whisper.load_model("small")
|
9 |
|
|
|
10 |
def transcribe(audio):
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
|
29 |
|
30 |
def process_text(input_text):
|
|
|
7 |
|
8 |
model = whisper.load_model("small")
|
9 |
|
10 |
+
|
11 |
def transcribe(audio):
|
12 |
+
model = whisper.load_model("base")
|
13 |
+
result = model.transcribe(audio_file)
|
14 |
+
return result["text"]
|
15 |
+
|
16 |
+
# def transcribe(audio):
|
17 |
|
18 |
+
# #time.sleep(3)
|
19 |
+
# # load audio and pad/trim it to fit 30 seconds
|
20 |
+
# audio = whisper.load_audio(audio)
|
21 |
+
# audio = whisper.pad_or_trim(audio)
|
22 |
+
|
23 |
+
# # make log-Mel spectrogram and move to the same device as the model
|
24 |
+
# mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
25 |
+
|
26 |
+
# # detect the spoken language
|
27 |
+
# _, probs = model.detect_language(mel)
|
28 |
+
# print(f"Detected language: {max(probs, key=probs.get)}")
|
29 |
+
|
30 |
+
# # decode the audio
|
31 |
+
# options = whisper.DecodingOptions(fp16 = False)
|
32 |
+
# result = whisper.decode(model, mel, options)
|
33 |
+
# return result.text
|
34 |
|
35 |
|
36 |
def process_text(input_text):
|