
import argparse
import torch
import gradio as gr
from moondream import detect_device, LATEST_REVISION
from threading import Thread
from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
from PIL import ImageDraw, Image
import re
from torchvision.transforms.v2 import Resize
import cv2
import time
import queue
from ultralytics import YOLO
from moviepy.editor import ImageSequenceClip



frame_count = 10
frame_interval = 60



def yolo_annotate(img):
    from ultralytics import YOLO

    model = YOLO("yolov8s-world.pt")

    custom_classes = ['person']
    model.set_classes(custom_classes)

    results = model.predict(img, conf=0.05, iou=0.45)
    im_array = results[0].plot()  # plot a BGR numpy array of predictions
    return im_array

def write_frames_to_video(frame_list, output_file, fps=30):

    clip = ImageSequenceClip(frame_list, fps=fps)
    clip.write_videofile(output_file, codec='libx264')


def process_video_and_display(frames, fps):
    output_file = "output_video.mp4"
    video_path = write_frames_to_video(frames, output_file, fps)
    return video_path

def update_processed_video(uploaded_video):
    cap = cv2.VideoCapture(uploaded_video)
    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) 
    # import pdb;pdb.set_trace()
    fps = cap.get(cv2.CAP_PROP_FPS)  # Get the frame rate of the video
    expansion = [e for e in range(max(0,10-int(frame_interval//2)+1), min(10+int(frame_interval//2)-1, int(total_frames)))]

    frames = []
    frame_count = 0
    # import pdb;pdb.set_trace()
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        if frame is None:
            print(f"Warning: Skipping frame. Cannot load ")
            continue
        # Convert frame to BGR color format if necessary
        if len(frame.shape) == 3 and frame.shape[2] == 3:  # If frame is in color
            frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        else:
            frame_bgr = frame
        
        if frame_count in expansion:
            frame_bgr = yolo_annotate(frame_bgr)

        frames.append(frame_bgr)
        frame_count += 1
    cap.release()
    # import pdb;pdb.set_trace()
    video_path = process_video_and_display(frames, fps)
    return video_path

update_processed_video("高空作业.mp4")