import cv2
from ultralytics import YOLO
import os

# --- FIX for OMP: Error #15 ---
# This line MUST be before importing torch or ultralytics if they are not already loaded.
# It prevents a crash or warning when multiple OpenMP runtimes are loaded.
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

# --- Configuration Parameters ---

# Path to your trained YOLO model
MODEL_PATH = 'best.pt'
# Path to the input video file
VIDEO_PATH = './trainvideo/panelvedio.mp4'
# Directory to save the output results
OUTPUT_DIR = 'runs/detect'
# Filename for the output video
OUTPUT_VIDEO_NAME = 'output_panelvedio_tracked.mp4'

# --- KEY PARAMETER TO TUNE ---
# Confidence threshold: Only detections with a confidence score higher than this will be shown.
# If you get "(no detections)", TRY LOWERING THIS VALUE, for example to 0.15 or 0.1
CONFIDENCE_THRESHOLD = 0.25

# --- Main Processing Function ---

def main():
    """
    Main function to load the model, process the video, and save the result.
    """
    # 1. Load the YOLO model
    print(f"Loading model from: {MODEL_PATH}")
    try:
        model = YOLO(MODEL_PATH)
    except Exception as e:
        print(f"Error: Failed to load the model. Please check if '{MODEL_PATH}' is a valid Ultralytics YOLO model file.")
        print(f"Details: {e}")
        return

    # 2. Open the video file
    print(f"Opening video file: {VIDEO_PATH}")
    if not os.path.exists(VIDEO_PATH):
        print(f"Error: Video file not found at '{VIDEO_PATH}'")
        return

    cap = cv2.VideoCapture(VIDEO_PATH)
    if not cap.isOpened():
        print("Error: Could not open video file.")
        return

    # 3. Get video properties to create the output video writer
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    # 4. Set up the output video writer
    # Ensure the output directory exists
    os.makedirs(OUTPUT_DIR, exist_ok=True)
    output_video_path = os.path.join(OUTPUT_DIR, OUTPUT_VIDEO_NAME)

    # Define the codec and create VideoWriter object. 'mp4v' is a good choice for .mp4 files.
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height))

    print("Starting video inference...")
    print("Press 'q' in the display window to quit early.")

    # 5. Loop through video frames
    while cap.isOpened():
        # Read a frame from the video
        success, frame = cap.read()

        if not success:
            # End of video or read error
            print("Reached end of video or failed to read frame.")
            break

        # --- Perform Inference and Tracking ---
        # model.track() is ideal for video. It performs detection and assigns a unique ID to each object across frames.
        # 'persist=True' tells the tracker that the current frame is consecutive to the previous one.
        # 'conf' sets the confidence threshold for detections.
        results = model.track(source=frame, persist=True, conf=CONFIDENCE_THRESHOLD)

        # The 'results' object contains all information.
        # results[0].plot() is a helper function that automatically draws the bounding boxes, labels, and track IDs on the frame.
        annotated_frame = results[0].plot()

        # Display the resulting frame in a window
        cv2.imshow('YOLOv8 Tracking - Press Q to Quit', annotated_frame)

        # Write the annotated frame to the output video file
        out.write(annotated_frame)

        # Allow quitting by pressing 'q'
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # 6. Clean up and release resources
    print("Processing finished. Releasing resources...")
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    print(f"Successfully saved tracked video to: {output_video_path}")


if __name__ == "__main__":
    main()