import gradio as gr

import openai, subprocess
from openai import OpenAI

client = OpenAI(api_key="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")  # should use env variable OPENAI_API_KEY

messages = [{"role": "system", "content": '你是一名知识渊博，乐于助人的智能聊天机器人.你的任务是陪我聊天，请用简短的对话方式，用中文讲一段话，每次回答不超过50个字！'}]

def transcribe(audio):
    print(audio)
    # audio_file = open(audio, "rb")
    # audio_file = open("D:/project/智能语音助手/youtube_gpt_assistant/samiassistant/audiotmp/audio.mp3", "rb")
    # transcript = openai.Audio.transcribe("whisper-1",file=audio_file)
    # print( transcript )

    # audio_file = open("D:/project/智能语音助手/youtube_gpt_assistant/samiassistant/audio_tmp/audio.mp3", "rb")
    audio_file = open(audio, "rb")
    transcript = client.audio.transcriptions.create(
        model="whisper-1",
        file=audio_file
    )

    # transcript.text

    messages.append({"role": "user", "content": transcript.text})

    completion = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages
    )

    system_message  = completion.choices[0].message
    messages.append(system_message )

    subprocess.call(["wsay", system_message.content])
    #

    # chat_transcript = ""
    # for message in messages:
    #     if message['role'] != 'system':
    #         chat_transcript += message['role'] + ": " + message['content'] + "\n\n"

    return system_message.content

ui = gr.Interface(
    fn=transcribe,
    inputs=gr.Audio(sources=["microphone"],type='filepath'),
    outputs="text"
).launch()

ui.launch()