import sys
from PyQt6.QtWidgets import QApplication, QMainWindow, QFileDialog, QWidget, QVBoxLayout
from PyQt6.QtMultimedia import QMediaPlayer, QMediaContent
from PyQt6.QtCore import Qt, QUrl, QTimer
from PyQt6.QtGui import QImage, QPixmap
import cv2
from ultralytics import YOLO
from collections import defaultdict

class VideoWidget(QWidget):
    def __init__(self):
        super(VideoWidget, self).__init__()
        self.video_label = QLabel(self)
        self.video_label.setAlignment(Qt.center())

        self.layout = QVBoxLayout()
        self.layout.addWidget(self.video_label)
        self.setLayout(self.layout)

    def set_frame(self, frame):
        # Convert BGR to RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # Convert the OpenCV image format to QImage
        height, width, channel = rgb_frame.shape
        bytes_per_line = 3 * width
        q_image = QImage(rgb_frame.data, width, height, bytes_per_line, QImage.Format_RGB888)

        # Convert QImage to QPixmap for display in QLabel
        pixmap = QPixmap.fromImage(q_image)

        # Resize the image to fit the label
        pixmap = pixmap.scaled(self.video_label.size(), Qt.KeepAspectRatio)

        # Set the pixmap to the QLabel
        self.video_label.setPixmap(pixmap)

class VideoProcessingApp(QMainWindow):
    def __init__(self):
        super(VideoProcessingApp, self).__init__()

        self.setWindowTitle('YOLOv8 Video Processing')
        self.setGeometry(100, 100, 800, 600)

        self.video_widget = VideoWidget()
        self.setCentralWidget(self.video_widget)

        self.model = YOLO('yolov8n.pt')
        self.cap = cv2.VideoCapture("images/008.mp4")

        self.total_detections = 0
        self.class_counts = defaultdict(int)

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.process_video_frame)
        self.timer.start(33)  # Update every 33 milliseconds (30 fps)

    def process_video_frame(self):
        if self.cap.isOpened():
            success, frame = self.cap.read()
            if success:
                results = self.model(frame)

                for box in results[0].boxes:
                    class_name = self.model.names[int(box.cls.item())]
                    confidence = float(box.conf)
                    self.class_counts[class_name] += 1
                    print(f"Class: {class_name}, Confidence: {confidence}")

                self.total_detections += len(results[0].boxes)

                annotated_frame = results[0].plot()
                self.video_widget.set_frame(annotated_frame)
            else:
                self.cap.release()
                self.timer.stop()
                self.calculate_class_percentages()
                print("Video processing completed.")

    def calculate_class_percentages(self):
        class_percentages = {class_name: count / self.total_detections for class_name, count in self.class_counts.items()}
        print("Class Percentages:")
        for class_name, percentage in class_percentages.items():
            print(f"{class_name}: {percentage:.2%}")

if __name__ == '__main__':
    app = QApplication(sys.argv)
    main_app = VideoProcessingApp()
    main_app.show()
    sys.exit(app.exec())
