import argparse
import torch
import gradio as gr
from moondream import detect_device, LATEST_REVISION
from threading import Thread
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
from PIL import ImageDraw, Image
import re
from torchvision.transforms.v2 import Resize
import cv2
import time
import queue
import logging
from moviepy.editor import ImageSequenceClip
import random
from ultralytics import YOLO


from llavamodel.model.builder import load_pretrained_model
from llavamodel.mm_utils import get_model_name_from_path
from llavamodel.eval.run_llava import eval_model

yolo_model = YOLO("yolov8s-world.pt")
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

key_frame_list = []

parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true")
args = parser.parse_args()



if args.cpu:
    device = torch.device("cpu")
    dtype = torch.float32
else:
    device, dtype = detect_device()
    if device != torch.device("cpu"):
        logger.info("Using device:{}".format(device))
        logger.info("If you run into issues, pass the `--cpu` flag to this script.")


model_path = "liuhaotian/llava-v1.5-7b"

tokenizer, model, image_processor, context_len = load_pretrained_model(
    model_path=model_path,
    model_base=None,
    model_name=get_model_name_from_path(model_path)
)
logger.info(f"Initialize model finished!")
# model_id = "vikhyatk/moondream2"
# tokenizer = AutoTokenizer.from_pretrained(model_id, revision=LATEST_REVISION)
# moondream = AutoModelForCausalLM.from_pretrained(
#     model_id, trust_remote_code=True, revision=LATEST_REVISION, torch_dtype=dtype
# ).to(device=device)
# moondream.eval()

# 检查图中是否存在不佩戴安全帽，安全带，和跨越栏杆的行为


def answer_question(img, prompt):
    # image_embeds = model.encode_image(img)
    img = img.convert("RGB")
    streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
    answer = eval_model(
                model_path=model_path,
                model_base=None,
                model_name=get_model_name_from_path(model_path),
                query=prompt,
                conv_mode=None,
                image_file=img,
                sep=",",
                temperature=0,
                top_p=None,
                num_beams=1,
                max_new_tokens=512,
                tokenizer=tokenizer, 
                model=model, 
                image_processor=image_processor, 
                context_len=context_len 
            )
    # import pdb;pdb.set_trace()
    return answer

logger.info(f"Initialize answer_question finished!")



def extract_floats(text):
    pattern = r"\[\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*,\s*(-?\d+\.\d+)\s*\]"
    match = re.search(pattern, text)
    if match:
        return [float(num) for num in match.groups()]
    return None
logger.info(f"Initialize extract_floats finished!")

def extract_bbox(text):
    bbox = None
    if extract_floats(text) is not None:
        x1, y1, x2, y2 = extract_floats(text)
        bbox = (x1, y1, x2, y2)
    return bbox
logger.info(f"Initialize extract_bbox finished!")

def process_answer(img, answer):
    if extract_bbox(answer) is not None:
        x1, y1, x2, y2 = extract_bbox(answer)
        draw_image = Resize(768)(img)
        width, height = draw_image.size
        x1, x2 = int(x1 * width), int(x2 * width)
        y1, y2 = int(y1 * height), int(y2 * height)
        bbox = (x1, y1, x2, y2)
        ImageDraw.Draw(draw_image).rectangle(bbox, outline="red", width=3)
        return gr.update(visible=True, value=draw_image)

    return gr.update(visible=False, value=None)
logger.info(f"Initialize process_answer finished!")

# for video 是否存在没有佩戴安全带的行为？只回答“是”或者“否”
# 是否存在翻越栏杆的行为？只回答是或者否即可
def process_frame(video_path, prompt, answer_queue):
    global key_frame_list
    video = cv2.VideoCapture(video_path)
    # frame_list = []
    fps = video.get(cv2.CAP_PROP_FPS)  # Get the frame rate of the video
    frame_interval = int(fps)  # Interval to process one frame per second
    total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) 

    success, frame = video.read()
    # if frame is not None:
    #     if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
    #         frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    #     else:
    #         frame_bgr = frame
    frame_count = 0
    answers = []

    while success:
        try:
            if frame_count % frame_interval == 0:  # Process one frame per second
                timestamp = frame_count / fps  # Calculate timestamp in seconds
                minutes, seconds = divmod(timestamp, 60)
                timestamp_str = f"{int(minutes):02d}:{int(seconds):02d}"

                logger.info("Processing frame {} at timestamp {}".format(frame_count, timestamp_str))


                image = Image.fromarray(frame)
                frame = image.convert('RGB')
                
                # enc_image = model.encode_image(frame).to(device)
                # answer = model.answer_question(enc_image, prompt, tokenizer)
                answer = eval_model(
                            model_path=model_path,
                            model_base=None,
                            model_name=get_model_name_from_path(model_path),
                            query=prompt,
                            conv_mode=None,
                            image_file=frame,
                            sep=",",
                            temperature=0,
                            top_p=None,
                            num_beams=1,
                            max_new_tokens=512,
                            tokenizer=tokenizer, 
                            model=model, 
                            image_processor=image_processor, 
                            context_len=context_len 
                        )
                if "Yes" in answer or "是" in answer:
                    # frame_bgr = yolo_annotate(frame_bgr)
                    expansion = [e for e in range(max(0,frame_count-frame_interval//2+1),min(frame_count+frame_interval//2-1,int(total_frames)))]
                    key_frame_list.extend(expansion)
                
                if isinstance(answer, torch.Tensor):
                    answer = answer.cpu().tolist()
                pure_answer = answer.replace('\n\n', ' ')
                answers.append(f"🌔 时间: {timestamp_str} (第 {frame_count} 帧):\n\n > {pure_answer}")
                
                answer_queue.put("\n\n".join(answers))

            # frame_list.append(frame_bgr)
            success, frame = video.read()
            frame_count += 1
            # if frame is not None:
            #     if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
            #         frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            #     else:
            #         frame_bgr = frame

        except EOFError:
            break
    # process_video_and_display(frame_list, fps)
    answer_queue.put(None)  # Signal completion
logger.info(f"Initialize process_frame finished!")


def generate_responses_upload(video_path, prompt):
    clear_key_frame_list()
    answer_queue = queue.Queue()
    processing_thread = Thread(target=process_frame, args=(video_path, prompt, answer_queue))
    processing_thread.start()

    # pre_answer_queue = ""
    while True:
        answer = answer_queue.get()
        if answer is None:
            # process_path = update_processed_video(video_path)
            # yield pre_answer_queue, gr.update(value=process_path, visible=True)
            break
        # pre_answer_queue = answer
        yield answer #, gr.update(visible=False)
logger.info(f"Initialize generate_responses_upload finished!")

def process_answer2(video, answer):
    if extract_bbox(answer) is not None:
        x1, y1, x2, y2 = extract_bbox(answer)
        # TODO: processing each frame bbox
        draw_image = Resize(768)(video)
        width, height = draw_image.size
        x1, x2 = int(x1 * width), int(x2 * width)
        y1, y2 = int(y1 * height), int(y2 * height)
        bbox = (x1, y1, x2, y2)
        ImageDraw.Draw(draw_image).rectangle(bbox, outline="red", width=3)
        return gr.update(visible=True, value=draw_image)

    return gr.update(visible=False, value=None)
logger.info(f"Initialize process_answer2 finished!")

def yolo_annotate(img):

    custom_classes = ['person']
    yolo_model.set_classes(custom_classes)

    results = yolo_model.predict(img, conf=0.5, iou=0.45)
    im_array = results[0].plot()  # plot a BGR numpy array of predictions
    return im_array
logger.info(f"Initialize yolo_annotate finished!")

def write_frames_to_video(frame_list, output_file, fps=30):
    clip = ImageSequenceClip(frame_list, fps=fps)
    clip.write_videofile(output_file, codec='libx264')

logger.info(f"Initialize write_frames_to_video finished!")

def process_video_and_display(frames, fps):
    output_file = "output_video.mp4"
    write_frames_to_video(frames, output_file, fps)
    return output_file
logger.info(f"Initialize process_video_and_display finished!")

def update_processed_video(uploaded_video):
    cap = cv2.VideoCapture(uploaded_video)
    fps = cap.get(cv2.CAP_PROP_FPS)  # Get the frame rate of the video
    frames = []
    frame_count = 0
    logger.info("expended key_frame_list, {}".format(key_frame_list))
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        if frame is None:
            logger.warning("Warning: Skipping frame. Cannot load ")
            continue
        if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
            frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        else:
            frame_bgr = frame
        
        if frame_count in key_frame_list:

            frame_bgr = yolo_annotate(frame_bgr)

        frames.append(frame_bgr)
        frame_count += 1
    cap.release()
    
    video_path = process_video_and_display(frames, fps)
    return video_path

def clear_key_frame_list():
    global key_frame_list
    key_frame_list = []


def generate_responses_webcam():
    pass

logger.info(f"Initialize allabove finished!")
    
# Is there anyone who does not wearing the helmet/hard hat ?
# Is there anyone who does not wearing the safety belt/gear ?
# Is there anyone who is corssing the fense ?
with gr.Blocks() as demo:
    gr.Markdown(
        """
        # 🌔 视觉-语言多模态安全检测
        """
    )

    gr.Markdown(
        """
        ---
        ## 处理图片
        """
    )

    with gr.Row():
        prompt = gr.Textbox(label="提示词输入", placeholder="在这里输入您的问题...", scale=4)
        submit = gr.Button("提交")
    with gr.Row():
        img = gr.Image(type="pil", label="上传一个视频")
        with gr.Column(): 
            output = gr.Markdown(label="回复结果")
            ann = gr.Image(visible=False, label="图像标注结果")

    submit.click(answer_question, [img, prompt], output)
    prompt.submit(answer_question, [img, prompt], output)
    output.change(process_answer, [img, output], ann, show_progress=False)

    gr.Markdown(
        """
        ---
        ## 处理视频
        """
    )
    with gr.Row():
        prompt2 = gr.Textbox(label="提示词输入", placeholder="是否存在没有佩戴安全帽的人员...", scale=4)
        submit2 = gr.Button("提交")
    
    with gr.Row():
        video = gr.Video(label="上传一个视频")
        with gr.Column():
            processed_video = gr.Video(value=None, label="视频标注结果")
    
    with gr.Row():
        output2 = gr.Markdown(label="回复结果")
        ann2 = gr.Image(visible=False, label="图像标注结果")
    
    determine = isinstance(video, str) and video.startswith(('http://', 'https://'))

    submit2.click(
            generate_responses_webcam if determine else generate_responses_upload, 
            [video, prompt2], 
            output2
        ).then(
            update_processed_video, 
            video, 
            processed_video
        )
    
    prompt2.submit(
            generate_responses_webcam if determine else generate_responses_upload, 
            [video, prompt2], 
            output2
        ).then(
            update_processed_video, 
            video, 
            processed_video
        )
    output2.change(process_answer2, [video, output2], ann2, show_progress=True)



demo.queue().launch(debug=True)
