import gradio as gr
import subprocess
import os
from datetime import datetime
from scipy.io.wavfile import write
import numpy as np

import sys
sys.path.append(".")
#from frcnn_src.fastercnn import faster_rcnn_detection 
from whisper_src.sst_test import call_whisper

def create_dir(dir_path):
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

def datatime_str() -> str :
    return datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

class LLMGradio:
    def __init__(self, cmd: list) -> None:
        self.p = subprocess.Popen(
            cmd,
            stdout=subprocess.PIPE,
            stdin=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        self.p.stdout.readline()
        self.p.stdout.readline()
        self.p.stdout.readline()


    def llm_fn(self, text:str):
        print("llm receive input: ", text)
        self.p.stdin.write(bytes(text + "\n", encoding="utf8"))
        self.p.stdin.flush()
        x = self.p.stdout.readline()
        print("read empty line")
        self.p.stdout.readline()
        print("finish empty line")
        return str(x, encoding="utf8")

class WhisperGradio:
    def __init__(self) -> None:
        self.tempfile_path = "./temp/krai/Whisper"
        create_dir(self.tempfile_path)

    def whisper_fn(self, audio):
        if audio == None:
            return None
        filename = datatime_str() + ".mp3"
        file_path = os.path.join(self.tempfile_path, filename)
        file_path = os.path.abspath(file_path)
        sample_rate, audio_array = audio
        write(file_path, sample_rate, audio_array.astype(np.int16))
        return self.call_model(file_path)
    
    def call_model(self, file_path):
        return call_whisper(file_path)

from PIL import Image

'''

class FRCNNGradio:
    def __init__(self) -> None:
        self.tempfile_path = "./temp/krai/FastCNN"
        create_dir(self.tempfile_path)

    def FRCNN_fn(self, image):
        if image.any() == None:
            return None
        pil_image = Image.fromarray(image)
        filename = datatime_str() + ".jpg"
        file_path = os.path.join(self.tempfile_path, filename)
        file_path = os.path.abspath(file_path)
        pil_image.save(file_path)
        target_file_path = os.path.join(self.tempfile_path, "result_" + filename)
        target_file_path = os.path.abspath(target_file_path)
        return self.call_model(file_path, target_file_path)
    
    def call_model(self, file_path, target_file_path):
        print("frcnn processing image file: ", file_path, ", and target file is writing to ", target_file_path)
        return faster_rcnn_detection(file_path, target_file_path)

'''

def call_yolov7(image):
    if image.any() == None:
        return None
    pil_image = Image.fromarray(image)
    filename = datatime_str() + ".jpg"
    file_path = os.path.join("./temp/krai/yolov7/", filename)
    pil_image.save(file_path)
    p = subprocess.run(
            [r"./yolov7", file_path]
            )
    print(p)
    return "./image.png"

def main():
    llmg = LLMGradio([r"/root/build/chat", "-m", "/root/build/../models/baichuan-q4.bin", "-t", "128", "--type", "baichuan", "--mmap"])
    whisperg = WhisperGradio()
    # frcnng = FRCNNGradio()

    with gr.Blocks() as iface:
        with gr.Column():
            gr.Image("./logo.jpg", height=126, width=127, show_download_button=False, show_share_button=False, show_label=False)
            gr.Markdown("*Choose Model.*")
        with gr.Tab("LLM"):
            with gr.Column():
                chatbot = gr.Chatbot()
                llm_input = gr.Textbox(label="talk with me!")
                llm_button = gr.Button("clear")

        def user(user_message, history):
            return "", history + [[user_message, None]]

        def bot(history):
            user_input = history[-1][0]
            bot_message = llmg.llm_fn(user_input)
            history[-1][1] = bot_message
            return history
        
        llm_input.submit(user, [llm_input, chatbot], [llm_input, chatbot], queue=False).then(
            bot, chatbot, chatbot
        )
        llm_button.click(lambda: None, None, chatbot, queue=False)
        
        with gr.Tab("Whisper"):
            with gr.Row():
                whisper_input = gr.Audio(label="talk with me!")
                whisper_output = gr.Textbox(label="whisper response")
            whisper_buttom = gr.Button("submit")
        
        whisper_buttom.click(whisperg.whisper_fn, inputs=whisper_input, outputs=whisper_output)

        with gr.Tab("yolov7"):
            with gr.Row():
                frc_input = gr.Image(label = "input")
                frc_output = gr.Image(label = "response")
            frc_buttom = gr.Button("submit")
        frc_buttom.click(call_yolov7, inputs=frc_input, outputs=frc_output)
    
    iface.launch(server_port=8080)


if __name__ == "__main__":
    main()

