demo / tabs /openai.py
lvyb
xixi
c3a1883
import gradio as gr
import openai
def generate_image(prompt, api_key):
# api_key = os.environ.get('OPENAI_API_KEY')
if not api_key:
raise gr.Error("缺少OPENAI_API_KEY")
response = openai.Image.create(
prompt=prompt,
n=1,
size="1024x1024",
api_key=api_key
)
image_url = response['data'][0]['url']
return image_url
def audio(file, api_key):
if not api_key:
raise gr.Error("缺少OPENAI_API_KEY")
with open(file, 'rb') as f:
transcript = openai.Audio.transcribe(model="whisper-1", file=f,
api_key=api_key)
return transcript['text']
def openai_tab():
with gr.Tab("openai"):
gr.Markdown('### 文字生成图片')
api_key_input = gr.Textbox(label='OPENAI_API_KEY', placeholder='请输入你的OPENAI_API_KEY', type='password')
with gr.Row():
text_input = gr.Textbox(label='请输入你的创意', placeholder='请输入你的创意')
text_output = gr.Image(label="图片")
text_button = gr.Button('生成创意')
text_button.click(generate_image, inputs=[text_input, api_key_input], outputs=text_output)
gr.Markdown('### 音频识别')
with gr.Row():
file_input = gr.Audio(label='音频文件', type='filepath')
text_output = gr.Text()
text_button = gr.Button('音频识别')
text_button.click(audio, inputs=[file_input, api_key_input], outputs=text_output)