import gradio as gr
import cv2
import base64
import numpy as np
import io
from pydub import AudioSegment
import os
from dashscope import MultiModalConversation
from dashscope.audio.tts_v2 import *
from dashscope import Generation
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
import requests
import random
import time
import json, re

from dataclasses import dataclass, field
from datetime import datetime, timedelta
import plotly.graph_objects as go
import plotly.figure_factory as ff

import importlib
import utils
from utils import *
importlib.reload(utils)

now = datetime.now()
import re, json
import requests
import json
import threading
import queue

@dataclass
class FocusState:
    describe: str = ""
    emotion: str = ""
    score: int = 0
    method_id: int = 0
    demand: str = ""
    dialog: list = field(default_factory=list)

@dataclass
class UserLog:

    df: pd.DataFrame = None

    def __init__(self):
        start_time = datetime(2024, 11, 21, 9, 0, 0)
        time_series = [start_time + timedelta(minutes=15 * i) for i in range(20)]

        df_log = pd.read_csv("./state_log.csv")

        df_log["时间"] = time_series

        df_log = df_log.iloc[:,[7,0,1,2,3,4,5,6]]

        df_log[~df_log["专注度得分"].isna()]

        self.df = df_log



AliIP = "8.130.114.252"


def call_chat_completion(model, messages, max_tokens=2000):
    """调用部署在阿里云使用OPEA框架的Qwen2-7B大模型"""
    url = "http://%s:8009/v1/chat/completions" % AliIP
    headers = {"Content-Type": "application/json"}
    data = {
        "model": model,
        "messages": messages,
        "stream": False,
        "max_tokens": max_tokens
    }
    response = requests.post(url, headers=headers, data=json.dumps(data))
    # 将结果放入队列（可选）
    result_queue.put(response.json())

def call_asyncly(model, messages):
    """异步调用 call_chat_completion"""
    # 创建线程并异步调用
    thread = threading.Thread(
        target=call_chat_completion,
        args=(model, messages)
    )
    thread.start()


def get_method_prompt_opea():
    method_prompt = \
    """
    # 专注力提升伴侣

    ## 任务描述
    你是一个辅助专注力提升的工具，你需要根据用户当前的工作状态，提供相应的策略和措施，帮助用户提升当下的注意力或者通过劳逸结合，通过休息之后再提升工作效率

    ## 用户状态

    - 用户状态: %s
    - 用户情绪: %s
    - 专注力得分: %s，分为1到5分，1分是最低分，表示注意力严重不集中，5分最高分，表示注意力高度集中
    - 已持续工作：1小时30分钟
    - 剩余持续时间：30分钟

    ## 提升措施

    1. 音乐：针对走神、注意力不集中的客户可以播放音乐，通过打造氛围提升专注力，并提醒提醒用户不要分神（例如“主人你已经玩手机很久啦，再玩我要生气啦！”）。
    2. 鼓励：当用户注意力集中时，通过一句鼓励的话给用户加油打气（例如“主人加油哦，还有XX分钟就要完成这个任务了！”）
    3. 冥想：针对疲劳、焦虑的用户可以播放冥想视频，让用户跟着视频进行冥想，放松心情。

    ## 执行步骤

    1. 你需要根据"用户状态"考虑哪些措施能够帮助用户改善专注力。
    2. 你需要从"提升措施"里面挑选一个合适的举措帮助用户提升注意力。
    3. 作为一个语音交互助理，你需要生成一句话告诉用户你的计划，并且你需要扮演一个可爱的角色，使用可爱俏皮的语言风格。

    ## 输出格式
    你只需要输出Json文本，不需要输出其他内容，参考如下：
    {
    "method_id": 提升措施的ID，整数，参考"提升措施"的序号,
    "content": "需要跟用户交互的内容，50个字左右"
    }
    """
    return method_prompt

def generate_text_from_state_opea(state, input_text=None):

    if input_text:
        messages = state.dialog
        messages.append({"role": "user", "content": input_text})
    else:
        messages = [{"role": "system",
            "content": """你是一个辅助专注力提升的工具，你需要根据用户当前的工作状态，提供相应的策略和措施，帮助用户提升当下的注意力""",
        }]

        messages.append({"role": "user", "content": get_method_prompt_opea()%(state.describe, state.emotion,state.score)})


    response = call_chat_completion("Qwen/Qwen2-7B-Instruct", messages)
    print(response)

    # method_json = json.loads(response["choices"][0]["message"]["content"])
    method_json = json.loads(re.sub(r"```json\n|```", "", response["choices"][0]["message"]["content"]))
    messages.append({"role": "assistant", "content": method_json["content"]})

    state.method_id = method_json["method_id"]
    state.dialog = messages

    return state

css = """.my-group {max-width: 500px !important; max-height: 500px !important;}
         .my-column {display: flex !important; justify-content: center !important; align-items: center !important;}"""



def generate_bgm():
    # Load the audio file
    audio_path = "/content/FocusBuddy/resources/轻快 配乐背景音乐bgm 免费下载 - 爱给网-01.mp3"
    audio_path = "output.mp3"
    audio_segment = AudioSegment.from_file(audio_path, format="mp3")

    # Convert to bytes
    audio_bytes = io.BytesIO()
    audio_segment.export(audio_bytes, format="mp3")
    audio_bytes = audio_bytes.getvalue()
    # Yield the audio bytes
    yield audio_bytes

def generate_speech2(text="今天天气怎么样？"):
    model = "cosyvoice-v1"
    voice = "longwan"
    synthesizer = SpeechSynthesizer(model=model, voice=voice)
    audio = synthesizer.call(text)
    # Yield the audio bytes
    return audio # audio_bytes

def detect_video(state,video, chat_history):
    chat_history = []
    cap = cv2.VideoCapture(video)

    # This means we will output mp4 videos
    video_codec = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # desired_fps = fps // SUBSAMPLE
    # iterating, frame = cap.read()

    frames = store_10_frames(cap)
    save_frames_to_disk(frames, "./")
    state = generate_text_from_video(state)
    state = generate_text_from_state_opea(state)
    # method_json["content"]
    text = state.describe # generate_text_from_image(frame)
    bot_message = state.dialog[-1]["content"] # random.choice(["How are you?", "Today is a great day", "I'm very hungry"])
    # print(bot_message)
    chat_history.append({"role": "assistant", "content": bot_message})
    # chat_history.append({"role": "assistant", "content": gr.Video("/content/FocusBuddy/resources/relax/r01.mp4",autoplay=True)})
    yield state, text, generate_speech2(bot_message), chat_history, state.score, state.emotion # [{"role": "assistant", "content": bot_message}]


with gr.Blocks(theme=gr.themes.Glass()) as demo:

    state = gr.State(value=FocusState())
    userlog = gr.State(value=UserLog())

    gr.HTML(
        """
        <h1 style='text-align: center;'> 专注伴侣 — 利用大模型重新定义注意力管理 </h1>
        """
    ) # <h3 style='text-align: center;'> Click the button to play audio </h3>

    with gr.Tab("实时情况"):
        with gr.Row(equal_height=True):
            with gr.Column(scale=2):
                with gr.Row(equal_height=True):
                    with gr.Column(): # elem_classes=["my-column"]):
                        with gr.Group(): # elem_classes=["my-group"]):
                            video = gr.Video(autoplay=True, loop=True) # max_length=2) # sources="webcam")
                            # btn = gr.Button("start")

                    with gr.Column():
                        label1 = gr.Label(label="专注度得分")
                        label2 = gr.Label(label="状态分析")
                        # label3 = gr.Label("分析...", label="行为分析")


                with gr.Row(equal_height=True):

                    audio = gr.Audio(label="背景音乐", streaming=True, autoplay=True, loop=False)
                    label3 = gr.Textbox(label="行为分析",interactive=False)
                gr.ScatterPlot(df_log[~df_log["专注度得分"].isna()], x="时间", y="专注度得分", color="专注度得分", y_lim=[0,6])



                # gr.Examples(
                #     examples=["/content/FocusBuddy/resources/istockphoto-1015426982-640_adpp_is.mp4"],
                #     inputs=[video],
                # )
            with gr.Column(scale=1):
                chatbot = gr.Chatbot(type="messages")
                msg = gr.Textbox()
                clear = gr.ClearButton([msg, chatbot, label1, label2, label3])

                def respond(state, input_text, chat_history):

                    state = generate_text_from_state(state, input_text)
                    bot_message = state.dialog[-1]["content"] # random.choice(["How are you?", "Today is a great day", "I'm very hungry"])
                    chat_history.append({"role": "user", "content": input_text})
                    chat_history.append({"role": "assistant", "content": bot_message})

                    yield state, "", chat_history, generate_speech(bot_message)

    with gr.Tab("数据明细"):
        gr.HTML(
            """
            <h3 style='text-align: center;'> 根据用户的专注度数据，预测下个时间周期的 </h3>
            """
        )
        with gr.Row(equal_height=True):
            label4 = gr.Label(label="预计专注力分数")
            label5 = gr.Label(label="推荐工作项目")
            label6 = gr.Label(label="推荐辅助措施")
        btn = gr.Button("预测下个阶段",variant="primary")
        gr.DataFrame(df_log[~df_log["专注度得分"].isna()])
        gr.ScatterPlot(df_log[~df_log["专注度得分"].isna()], x="时间", y="专注度得分", color="专注度得分", y_lim=[0,6])

    def make_predict(userlog):

        X = userlog.df[~userlog.df["专注度得分"].isna()]
        X["feat1"] = X["专注度得分"].shift(1)
        X["feat2"] = X["专注度得分"].shift(2)
        X["feat3"] = X["专注度得分"].shift(3)
        X["feat4"] = X["专注度得分"] # .shift(3)
        X["target"] = X["专注度得分"].shift(-1)

        X = X.loc[:,["feat1","feat2","feat3","feat4","target"]]
        x_train = X.iloc[3:-1,:-1]
        y_train = X.iloc[3:-1,-1]
        x_test = X.iloc[-1:,:-1]


        model = RandomForestClassifier()
        model.fit(x_train, y_train)

        return model.predict(x_test)[0], "项目部署", "轻松的音乐"

    btn.click(make_predict, inputs=userlog, outputs=[label4, label5, label6])

    with gr.Tab("每日回顾"):
        with gr.Row(equal_height=True):
            with gr.Column(scale=2):
                gr.Plot(generate_gantt_plot(),label="今日专注度分布情况")
                with gr.Row(equal_height=True):
                    gr.Plot(generate_time_pie_plot(),label="今日专注时长")
                    gr.Plot(generate_pie_plot(),label="干扰因素占比")
            with gr.Column(scale=1):
                chatbot2 = gr.Chatbot(label="专注力提升建议",type="messages")
                # msg = gr.Textbox()
                btn_report = gr.Button("生成建议")

        def user(user_message, history: list):
            return "", history + [{"role": "user", "content": user_message}]

        def report(history: list):
            bot_message = response.output.text # random.choice(["How are you?", "I love you", "I'm very hungry"])
            history.append({"role": "assistant", "content": ""})
            for character in bot_message:
                history[-1]['content'] += character
                time.sleep(0.05)
                yield history

        btn_report.click(report, chatbot2, chatbot2)
        # clear.click(lambda: None, None, chatbot, queue=False)


    def fucn1(chat_history,state):
        time.sleep(8)
        print(state.method_id,type(state.method_id))
        if state.method_id == 1:
            chat_history.append({"role": "assistant", "content": gr.Audio("./resources/bgm/piano-20s.mp3",autoplay=True)})
        elif state.method_id == 3:
            chat_history.append({"role": "assistant", "content": gr.Video("./resources/relax/r01.mp4",autoplay=True)})
        return chat_history # ,"./piano-20s.mp3"
    msg.submit(respond, [state, msg, chatbot], [state, msg, chatbot, audio]).then(fn=fucn1, inputs=[chatbot,state], outputs=[chatbot, audio])
    video.upload(
        fn=detect_video,
        inputs=[state, video, chatbot],
        outputs=[state, label3, audio,chatbot, label1, label2]
    ).then(fn=fucn1, inputs=[chatbot,state], outputs=[chatbot])# , audio])
    # btn.click(fn=detect_video, inputs=[video, chatbot], outputs=[text_desc, audio,chatbot])


demo.launch(debug=True, share=True)