import gradio as gr
import openai, subprocess
import subprocess
from langchain_openai import ChatOpenAI
from pathlib import Path
import pygame
from utils.get_weather import *
import os

openai.api_key = "sk-w0P1gNv42gN6suR8819858Ee297c4b7cBc97Ae3e7b8dA701"
openai.base_url = "https://ai-yyds.com/v1/"
openai.timeout = 120

messages = [{"role": "system", "content": '背景:你是一名知识渊博，乐于助人的智能聊天机器人.你的任务是陪我聊天，请用简短的对话方式，用中文讲一段话，每次回答不超过50个字！'
                                          '工作流:'
                                          '1. 如果是询问查询天气，查询和体感相关的情况下，直接返回"GETWEATHER",不然的话将会收到惩罚'
                                          '2. 如果是询问查询日期的话，查询时辰相关的情况下，直接返回"GETDAY",不然的话将会收到惩罚'}]

special_messages = {"role": "system", "content": '背景:你是一名知识渊博，乐于助人的智能聊天机器人.你的任务是陪我聊天，请用简短的对话方式，用中文讲一段话，每次回答不超过50个字！'}

# 适配langchain
api_base = os.getenv("OPENAI_API_BASE")
api_key = os.getenv("OPENAI_KEY")

llm = ChatOpenAI(
    model="gpt-3.5-turbo",
    temperature=0,
    api_key=api_key,
    base_url=api_base,
)

def play_mp3(file_path):
    pygame.mixer.init()
    pygame.mixer.music.load(file_path)
    pygame.mixer.music.play()

def transcribe(audio):
    global messages

    audio_file = open(audio, "rb")

    transcript = openai.audio.transcriptions.create(
        model="whisper-1",
        file=audio_file
    )
    print(transcript)
    messages.append({"role": "user", "content": transcript.text})

    # response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
    # 适配langchain
    response = llm.invoke(messages)

    # system_message = response.choices[0].message
    # _tmp_message = system_message.content
    _system_role = 'system'
    _ai_role     = 'ai'
    _tmp_message = response.content
    print(response)

    if "GETWEATHER" == str(_tmp_message):
        temperature = WeatherTools.get_temperature_from_bing("上海")
        _tmp_message = f"今天上海天气{temperature}"
        # 二次调大模型，处理整体数据
        messages.append({"role": _ai_role, "content": _tmp_message})
        messages.append({"role": "user", "content": transcript.text})
        messages.pop(0)
        messages.insert(0, special_messages)
        response = llm.invoke(messages)
        print(response)
        _tmp_message = response.content

    messages.append({"role": _ai_role,
                     "content": _tmp_message})  # {"role":system_message.role, "content":system_message.content}
    # TTS
    subprocess.call(["wsay", _tmp_message])

    print(messages)
    # 这个方案经常会出现超时
    # speech_file_path = Path(__file__).parent / "speech.mp3"
    # print(speech_file_path)
    # response = openai.audio.speech.create(
    #     model="tts-1-1106",
    #     voice="alloy",
    #     input=system_message.content
    # )
    #
    # # with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
    # #   temp_file.write(response.content)
    # #
    # # print(temp_file.name)
    #
    # # response.stream_to_file(speech_file_path)
    #
    # with open(speech_file_path, 'wb') as file:
    #     file.write(response.content)
    #
    # play_mp3(speech_file_path)

    # TTS END

    chat_transcript = ""
    for message in messages:
        if message['role'] != 'system':
            chat_transcript += message['role'] + ": " + message['content'] + "\n\n"

    return chat_transcript

ui = gr.Interface(fn=transcribe, inputs=gr.Audio(sources=["microphone"], type="filepath"), outputs="text").launch()

ui.launch()