import gradio as gr
import whisper
import time

import paho.mqtt.publish as publish
import paho.mqtt.subscribe as subscribe
import json

import os
import time
os.environ ['MODELSCOPE_CACHE'] = './check'
from funasr import AutoModel
model = AutoModel(model="paraformer-zh",  vad_model="fsmn-vad",  punc_model="ct-punc",device='cuda:0')


def send_msg(message):
    publish.single("llm/ask", message, hostname="120.77.8.146")

def recive_message():
    msg = subscribe.simple("llm/answer", hostname="120.77.8.146")
    #print("%s %s" % (msg.topic, msg.payload))
    msg_json = json.loads(msg.payload)
    return msg_json['message'],  msg_json['history']
def speech_to_text(ad):
    print(ad)
    res = model.generate(input=ad, batch_size_s=300, hotword='避障,喷幅')
    print(res)
    result = res[0]
    message = '你是一个标准的信息提取器，识别文字的意图，其中飞行高度是数值类型，并且以米为单位， 是否打开避障为布尔类型，喷幅是数值类型，并且以米为单位' \
              '并且将他们以json的格式输出，' \
              '格式是这样的：飞行高度：\n 是否打开避障：\n 喷幅：\n 请抽取下面文本的信息：'+result["text"]
    print(message)
    send_msg(json.dumps({'message': message, 'history': []}))
    bot_message, history = recive_message()
    return bot_message

demo = gr.Blocks()

mic_transcribe = gr.Interface(
    fn=speech_to_text,
    inputs=gr.Audio(sources="microphone", type="filepath"),
    outputs=gr.Textbox(),
)

file_transcribe = gr.Interface(
    fn=speech_to_text,
    inputs=gr.Audio(sources="upload", type="filepath"),
    outputs=gr.Textbox(),
)
with demo:
    gr.TabbedInterface(
        [mic_transcribe, file_transcribe],
        ["Transcribe Microphone", "Transcribe Audio File"],
    )

demo.launch(server_name='0.0.0.0')