Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,47 +9,11 @@ prompt = "Type and press Enter"
|
|
9 |
|
10 |
|
11 |
def record_text(audio_file,api_key):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
# "model": "whisper-1",
|
18 |
-
# "messages": [
|
19 |
-
# {
|
20 |
-
# "role": "user",
|
21 |
-
# "content": [
|
22 |
-
# {
|
23 |
-
# "type": "text",
|
24 |
-
# "text": prompt
|
25 |
-
# },
|
26 |
-
# {
|
27 |
-
# "type": "text",
|
28 |
-
# "text": audio_text
|
29 |
-
# }
|
30 |
-
# ]
|
31 |
-
# }
|
32 |
-
# ],
|
33 |
-
# "max_tokens": 1000
|
34 |
-
# }
|
35 |
-
# response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
36 |
-
# audio_text_res = response.json()
|
37 |
-
# return response.data['text']
|
38 |
-
model_id = 'whisper-1'
|
39 |
-
media_file = open(audio_file, 'rb')
|
40 |
-
response = openai.Audio.transcribe(
|
41 |
-
api_key=api_key,
|
42 |
-
model=model_id,
|
43 |
-
file=media_file
|
44 |
-
)
|
45 |
-
return response.data['text']
|
46 |
-
|
47 |
-
# response = openai.audio.transcriptions.create({
|
48 |
-
# model: 'whisper-1',
|
49 |
-
# api_key: f"Bearer {api_key}",
|
50 |
-
# file: fs.createReadStream(audio_file),
|
51 |
-
# });
|
52 |
-
# return response.data['text']
|
53 |
|
54 |
# sound = audio_file
|
55 |
# sound_type = sound.split(".")
|
|
|
9 |
|
10 |
|
11 |
def record_text(audio_file,api_key):
|
12 |
+
openai.api_key = api_key
|
13 |
+
audio_file = open(audio_file, "rb")
|
14 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
15 |
+
text = transcript['text']
|
16 |
+
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# sound = audio_file
|
19 |
# sound_type = sound.split(".")
|