
# 测试3， WHISPER API

import gradio as gr
import openai

openai.api_key = "sk-fbN36d8rqnS5c7CK303f24723a1f4751B0E794318745Ef6a"
openai.base_url = "https://ai-yyds.com/v1/"

def transcribe(audio):
    # print(audio)
    audio_file= open(audio, "rb")
    # transcript = openai.Audio.transcribe("whisper-1", audio_file)

    transcript = openai.audio.transcriptions.create(
        model="whisper-1",
        file=audio_file
    )
    print(transcript)
    return transcript.text

ui = gr.Interface(
    fn=transcribe,
    inputs=gr.Audio(sources=["microphone"], type="filepath"),
    outputs="text"
).launch()

ui.launch()