import argparse
import torch
import gradio as gr
from moondream import detect_device, LATEST_REVISION
from threading import Thread
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
from PIL import ImageDraw, Image
import re
from torchvision.transforms.v2 import Resize
import cv2
import time
import queue
import logging
from moviepy.editor import ImageSequenceClip
import random
from ultralytics import YOLO

yolo_model = YOLO("yolov8s-world.pt")
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

key_frame_list = []

parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true")
args = parser.parse_args()



if args.cpu:
    device = torch.device("cpu")
    dtype = torch.float32
else:
    device, dtype = detect_device()
    if device != torch.device("cpu"):
        logger.info("Using device:{}".format(device))
        logger.info("If you run into issues, pass the `--cpu` flag to this script.")

model_id = "vikhyatk/moondream2"
tokenizer = AutoTokenizer.from_pretrained(model_id, revision=LATEST_REVISION)
moondream = AutoModelForCausalLM.from_pretrained(
    model_id, trust_remote_code=True, revision=LATEST_REVISION, torch_dtype=dtype
).to(device=device)
moondream.eval()

# 检查图中是否存在不佩戴安全帽，安全带，和跨越栏杆的行为

def answer_question(img, prompt):
    image_embeds = moondream.encode_image(img)
    streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
    thread = Thread(
        target=moondream.answer_question,
        kwargs={
            "image_embeds": image_embeds,
            "question": prompt,
            "tokenizer": tokenizer,
            "streamer": streamer,
        },
    )
    thread.start()

    buffer = ""
    for new_text in streamer:
        buffer += new_text
        yield buffer

def extract_floats(text):
    pattern = r"\[\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*\]"
    match = re.search(pattern, text)
    if match:
        return [float(num) for num in match.groups()]
    return None


def extract_bbox(text):
    bbox = None
    if extract_floats(text) is not None:
        x1, y1, x2, y2 = extract_floats(text)
        bbox = (x1, y1, x2, y2)
    return bbox


def process_answer(img, answer):
    if extract_bbox(answer) is not None:
        x1, y1, x2, y2 = extract_bbox(answer)
        draw_image = Resize(768)(img)
        width, height = draw_image.size
        x1, x2 = int(x1 * width), int(x2 * width)
        y1, y2 = int(y1 * height), int(y2 * height)
        bbox = (x1, y1, x2, y2)
        ImageDraw.Draw(draw_image).rectangle(bbox, outline="red", width=3)
        return gr.update(visible=True, value=draw_image)

    return gr.update(visible=False, value=None)


# for video
def process_frame(video_path, prompt, answer_queue):
    global key_frame_list
    video = cv2.VideoCapture(video_path)
    # frame_list = []
    fps = video.get(cv2.CAP_PROP_FPS)  # Get the frame rate of the video
    frame_interval = int(fps)  # Interval to process one frame per second
    total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) 

    success, frame = video.read()
    # if frame is not None:
    #     if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
    #         frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    #     else:
    #         frame_bgr = frame
    frame_count = 0
    answers = []

    while success:
        try:
            if frame_count % frame_interval == 0:  # Process one frame per second
                timestamp = frame_count / fps  # Calculate timestamp in seconds
                minutes, seconds = divmod(timestamp, 60)
                timestamp_str = f"{int(minutes):02d}:{int(seconds):02d}"

                logger.info("Processing frame {} at timestamp {}".format(frame_count, timestamp_str))


                image = Image.fromarray(frame)
                frame = image.convert('RGB')
                
                enc_image = moondream.encode_image(frame).to(device)
                answer = moondream.answer_question(enc_image, prompt, tokenizer)
                if "Yes" in answer:
                    # frame_bgr = yolo_annotate(frame_bgr)
                    expansion = [e for e in range(max(0,frame_count-frame_interval//2+1),min(frame_count+frame_interval//2-1,int(total_frames)))]
                    key_frame_list.extend(expansion)
                
                if isinstance(answer, torch.Tensor):
                    answer = answer.cpu().tolist()
                pure_answer = answer.replace('\n\n', ' ')
                answers.append(f"🌔 时间: {timestamp_str} (第 {frame_count} 帧):\n\n > {pure_answer}")
                
                answer_queue.put("\n\n".join(answers))

            # frame_list.append(frame_bgr)
            success, frame = video.read()
            frame_count += 1
            # if frame is not None:
            #     if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
            #         frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            #     else:
            #         frame_bgr = frame

        except EOFError:
            break
    # process_video_and_display(frame_list, fps)
    answer_queue.put(None)  # Signal completion
    


def generate_responses_upload(video_path, prompt):
    clear_key_frame_list()
    answer_queue = queue.Queue()
    processing_thread = Thread(target=process_frame, args=(video_path, prompt, answer_queue))
    processing_thread.start()

    # pre_answer_queue = ""
    while True:
        answer = answer_queue.get()
        if answer is None:
            # process_path = update_processed_video(video_path)
            # yield pre_answer_queue, gr.update(value=process_path, visible=True)
            break
        # pre_answer_queue = answer
        yield answer #, gr.update(visible=False)


def process_answer2(video, answer):
    if extract_bbox(answer) is not None:
        x1, y1, x2, y2 = extract_bbox(answer)
        # TODO: processing each frame bbox
        draw_image = Resize(768)(video)
        width, height = draw_image.size
        x1, x2 = int(x1 * width), int(x2 * width)
        y1, y2 = int(y1 * height), int(y2 * height)
        bbox = (x1, y1, x2, y2)
        ImageDraw.Draw(draw_image).rectangle(bbox, outline="red", width=3)
        return gr.update(visible=True, value=draw_image)

    return gr.update(visible=False, value=None)


def yolo_annotate(img):

    custom_classes = ['person']
    yolo_model.set_classes(custom_classes)

    results = yolo_model.predict(img, conf=0.05, iou=0.45)
    im_array = results[0].plot()  # plot a BGR numpy array of predictions
    return im_array

def write_frames_to_video(frame_list, output_file, fps=30):

    clip = ImageSequenceClip(frame_list, fps=fps)
    clip.write_videofile(output_file, codec='libx264')


def process_video_and_display(frames, fps):
    output_file = "output_video.mp4"
    write_frames_to_video(frames, output_file, fps)
    return output_file

def update_processed_video(uploaded_video):
    cap = cv2.VideoCapture(uploaded_video)
    fps = cap.get(cv2.CAP_PROP_FPS)  # Get the frame rate of the video
    frames = []
    frame_count = 0
    logger.info("expended key_frame_list, {}".format(key_frame_list))
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        if frame is None:
            logger.warning("Warning: Skipping frame. Cannot load ")
            continue
        if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
            frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        else:
            frame_bgr = frame
        
        if frame_count in key_frame_list:

            frame_bgr = yolo_annotate(frame_bgr)

        frames.append(frame_bgr)
        frame_count += 1
    cap.release()
    
    video_path = process_video_and_display(frames, fps)
    return video_path

def clear_key_frame_list():
    global key_frame_list
    key_frame_list = []


def generate_responses_webcam():
    pass


    
# Is there anyone who does not wearing the helmet/hard hat ? 
# Is there anyone who does not wearing the safety belt/gear ? / Did he wear a safety belt? Just answer yes or no.
# Is there anyone who is corssing the fense ? / Is there any security fense in the image? Just answer yes or no.
# Can you see the skin of his arm?
# Is there any ladder in the image? Just answer yes or no.
with gr.Blocks() as demo:
    gr.Markdown(
        """
        # 🌔 视觉-语言多模态安全检测
        """
    )

    gr.Markdown(
        """
        ---
        ## 处理图片
        """
    )

    with gr.Row():
        prompt = gr.Textbox(label="提示词输入", placeholder="在这里输入您的问题...", scale=4)
        submit = gr.Button("提交")
    with gr.Row():
        img = gr.Image(type="pil", label="上传一个视频")
        with gr.Column(): 
            output = gr.Markdown(label="回复结果")
            ann = gr.Image(visible=False, label="图像标注结果")

    submit.click(answer_question, [img, prompt], output)
    prompt.submit(answer_question, [img, prompt], output)
    output.change(process_answer, [img, output], ann, show_progress=False)

    gr.Markdown(
        """
        ---
        ## 处理视频
        """
    )
    with gr.Row():
        prompt2 = gr.Textbox(label="提示词输入", placeholder="是否存在没有佩戴安全帽的人员...", scale=4)
        submit2 = gr.Button("提交")
    
    with gr.Row():
        video = gr.Video(label="上传一个视频")
        with gr.Column():
            processed_video = gr.Video(value=None, label="视频标注结果")
    
    with gr.Row():
        output2 = gr.Markdown(label="回复结果")
        ann2 = gr.Image(visible=False, label="图像标注结果")
    
    determine = isinstance(video, str) and video.startswith(('http://', 'https://'))

    submit2.click(
            generate_responses_webcam if determine else generate_responses_upload, 
            [video, prompt2], 
            output2
        ).then(
            update_processed_video, 
            video, 
            processed_video
        )
    
    prompt2.submit(
            generate_responses_webcam if determine else generate_responses_upload, 
            [video, prompt2], 
            output2
        ).then(
            update_processed_video, 
            video, 
            processed_video
        )
    output2.change(process_answer2, [video, output2], ann2, show_progress=True)



demo.queue().launch(debug=True)
