import gradio as gr
import time
from langchain_core.prompts import PromptTemplate
from vllm import LLM, SamplingParams

from common import *


llm = LLM(model=MODEL_NAME)
summary_text = ''
sampling_params = SamplingParams(max_tokens=256)


def read_action(url='', file_path=None):
    temp_folder = f'temp_{time.time()}'
    read_text = ''

    if file_path:
        create_folder(temp_folder)
        ext = os.path.splitext(file_path)[1]
        if check_file_type(file_path, 'audio'):
            read_text = audio2text(temp_folder, file_path)
        elif check_file_type(file_path, 'video'):
            video2audio(temp_folder, file_path)
            read_text = audio2text(temp_folder)
        else:
            if ext == '.doc' or ext == '.docx':
                read_text = read_docx(file_path)
            elif ext == '.pdf':
                read_text = read_pdf(file_path)
            else:
                with open(file_path, 'r', encoding='utf-8') as fp:
                    read_text = fp.read()
    elif url:
        create_folder(temp_folder)
        if download_bilibili_audio(temp_folder, url):
            read_text = audio2text(temp_folder)
        else:
            read_text = '在线视频读取失败'

    return temp_folder, read_text


def summary_action(text):
    prompt_template = PromptTemplate(
        input_variables=["text"],
        template=(
            "You are a helpful assistant for summary text.\n"
            "Outline the text below, output in markdown format:\n"
            "```\n{text}\n```"
        )
    )
    formatted_prompt = prompt_template.format(text=text)
    responses = llm.generate([formatted_prompt], sampling_params)
    summary_text = responses[0].outputs[0].text.strip()
    new_history = summary_text
    return new_history, summary_text


def ask_action(previous_output, context, question):
    prompt_template = PromptTemplate(
        input_variables=["context", "question"],
        template=(
            "Context: {context}\n"
            "Question: {question}\n"
            "Answer:"
        ),
    )
    formatted_prompt = prompt_template.format(context=context, question=question)
    responses = llm.generate([formatted_prompt], sampling_params)
    answer = responses[0].outputs[0].text.split("Answer:")[-1].strip()
    output_text = f"[Question] {question}\n[Answer] \n{answer}"
    new_output = f"{previous_output}\n{TEXT_SPLITTER}{output_text}"
    new_history = new_output
    return new_history, new_output, ''


if __name__ == '__main__':
    print_system_info()

    with gr.Blocks() as demo:
        gr.Markdown(f"# {TOOL_NAME}")
        with gr.Row():
            with gr.Column():
                url = gr.Textbox(label="输入在线视频 URL（当前支持 B站）")
                upload_file = gr.File(label="或者上传本地文件（支持常见格式的文档、音频、视频）")
                read_button = gr.Button("读取")
                read_output = gr.Textbox(label="读取内容", lines=12, placeholder="")
                summary_button = gr.Button("总结")
            with gr.Column():
                summary_output = gr.Textbox(label="总结", lines=30, placeholder="")
                ask_input = gr.Textbox(label="继续向 AI 询问", lines=1, placeholder="")
                ask_button = gr.Button("询问")

        upload_file.upload(clear_url_textbox, inputs=upload_file, outputs=url)

        state = gr.State(value=False)
        temp_folder = gr.State(value="")
        ask_history = gr.State(value="")
        read_button.click(
            read_action,
            inputs=[url, upload_file],
            outputs=[temp_folder, read_output]
        )
        summary_button.click(
            summary_action,
            inputs=read_output,
            outputs=[ask_history, summary_output]
        )
        ask_button.click(
            ask_action,
            inputs=[ask_history, read_output, ask_input],
            outputs=[ask_history, summary_output, ask_input]
        )

    demo.launch(server_name='0.0.0.0', share=True)
