from src.qt.stream.video_capture import CameraCaptureThread
from src.qt.stream.visualize import VideoVisualizationThread
# from src.qt.stream.ai_worker import AiWorkerThread

from libdet import *
from src.ui.main_window import Ui_MainWindow
from src.qt.video.video_worker import FileProcessThread
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QTimer
from PyQt5.QtGui import QImage, QPixmap, QPainter, QIcon
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMenu, QAction

import sys
import numpy as np
import cv2 as cv
import torch

class DetThread(QThread):
    send_img = pyqtSignal(np.ndarray)
    send_raw = pyqtSignal(np.ndarray)
    send_result = pyqtSignal(list)
    # emit：detecting/pause/stop/finished/error msg
    send_msg = pyqtSignal(str)
    send_percent = pyqtSignal(int)
    send_fps = pyqtSignal(str)
    
    def __init__(self):
        super(DetThread, self).__init__()
        # self.send_img = pyqtSignal(QImage)
        self.timer = QTimer()
        self.timer.timeout.connect(self.run)  # 连接定时器到处理帧的方法

        # self.setupUi(self)
        self.det = Predict("./weights/yolo11s.pt") # init yolo model
        print("DetThread init")
        # self.screen_size = (self.label_display.width(), self.label_display.height

        self.play_status = False
        self.stop_status = False


        self.conf_thr = 0.3
        self.set_color_R = 0.99
        self.set_color_G = 0.99
        self.set_color_B = 0.00

        self.source = "0"
        self.frame_interval = 0
        self.model_name = "yolo11s.pt"
        self.ai_task = "object_detection"
        self.tracker_name = "deepsort"
        self.control_conf_status = 0.75
        self.img_choose = "detection"
        self.set_timer = 3
        
        self.jump_out = False
        self.change_model_status = False
        self.is_continue = False
    
    # def process_frame(self):
    #     if not self.play_status or self.stop_status:
    #         self.timer.stop()
    #         return
        
    #     try:
    #         loop_start = cv2.getTickCount()
    #         im1, im0, text = self.det.predict(self.source, self.control_conf_status,
    #                                           self.set_color_R, self.set_color_G, self.set_color_B)
    #         loop_time = cv2.getTickCount() - loop_start
    #         total_time = loop_time / cv2.getTickFrequency()
    #         fps = int(1 / total_time) if total_time > 0 else 30  # 避免除零错误
    #         interval = int(5000 / fps)  # 计算定时器的间隔
    #         self.timer.start(interval)  # 重新设定定时器
            
    #         if self.img_choose == "origin":
    #             self.send_img.emit(im0)
    #         else:
    #             self.send_img.emit(im1)
    #         self.send_result.emit([text, fps])
    #     except Exception as e:
    #         print(f"Error: {e}")

    @torch.no_grad()
    def run(self,
            imgsz=640,  # inference size (pixels)
            max_det=1000,  # maximum detections per image
            device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
            view_img=True,  # show results
            save_txt=False,  # save results to *.txt
            save_conf=False,  # save confidences in --save-txt labels
            save_crop=False,  # save cropped prediction boxes
            nosave=False,  # do not save images/videos
            classes=None,  # filter by class: --class 0, or --class 0 2 3
            agnostic_nms=False,  # class-agnostic NMS
            augment=False,  # augmented inference
            visualize=False,  # visualize features
            update=False,  # update all models
            project='runs/detect',  # save results to project/name
            name='exp',  # save results to project/name
            exist_ok=False,  # existing project/name ok, do not increment
            line_thickness=3,  # bounding box thickness (pixels)
            hide_labels=False,  # hide labels
            hide_conf=False,  # hide confidences
            half=False,  # use FP16 half-precision inference
            ):
        # ret = cv2.VideoCapture(0)
        # im1 = cv2.imread("./test.jpg") # im0 -> origin_img
        names = ["person", "dog", "cat"]
        statistic_dic = {name: 0 for name in names}
        # while True:
        if self.stop_status:
            # print("Jump out")
            # self.send_percent.emit(0)
            # self.send_msg.emit('Stop')
            # if hasattr(self, 'out'):
                # self.out.release()
            self.timer.stop()

        if self.change_model_status:
            self.det.change_model(self.weights)
            print("Change model")
            self.change_model_status = False
            
        if self.play_status:
            try:
                # print(self.conf_thr)
                # print(self.iou_thr)
                # print(self.control_conf_status)
                # print(self.source)
                loop_start = cv2.getTickCount() # 开始计时
                im1, im0, text = self.det.predict(self.source, self.control_conf_status, self.set_color_R, self.set_color_G, self.set_color_B)
                loop_time = cv2.getTickCount() - loop_start
                total_time = loop_time / cv2.getTickFrequency()
                fps = int(1 / total_time)
                print(f"Det ing ... FPS is {fps}")
                fps = int(1 / total_time) if total_time > 0 else 30  # 避免除零错误
                interval = int(self.set_timer * 5000 / fps)  # 计算定时器的间隔
                self.timer.start(interval)  # 重新设定定时器
                # cv2.waitKey(int(1000/fps))
                
                if self.img_choose == "origin":
                    self.send_img.emit(im0)
                else:
                    self.send_img.emit(im1)
                self.send_result.emit([text, fps])
                # self.send_img.emit(im0)
                # self.send_raw.emit(im1 if isinstance(im1, np.ndarray) else im1[0])
                # self.send_statistic.emit(text)
                # self.send_percent.emit(30)
                # self.send_fps.emit(f"fps: {fps}")
            except Exception as e:
                print(f"Error: {e}")
                # self.send_msg.emit('%s' % e)

class MainWindow(QMainWindow, Ui_MainWindow):
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)

        self.send_img = pyqtSignal(QImage)

        self.setupUi(self)
        self.det_thread = DetThread() # 等待修改
        # self.camera_thread = CameraCaptureThread()
        self.display_thread = VideoVisualizationThread()
        # self.file_process_thread = FileProcessThread
        # self.screen_size = (self.label_display.width(), self.label_display.height

        self.play_status = True

        
        self.conf_thr = 0.3
        self.iou_thr = 0.45
        self.frame_interval = 0
        self.model_name = "yolo11s.pt"
        self.ai_task = "object_detection"
        self.tracker_name = "deepsort"

        self.jump_out = False
        self.change_model_status = False
        self.is_continue = False

        self.init_slots()
        self.buttons_states("waiting_for_setting")
    

    def init_slots(self):
        self.radioButton_det.toggled.connect(lambda: self.get_ai_task(self.radioButton_det))
        self.radioButton_pose.toggled.connect(lambda: self.get_ai_task(self.radioButton_pose))
        self.radioButton_seg.toggled.connect(lambda: self.get_ai_task(self.radioButton_seg))
        self.doubleSpinBox_conf.valueChanged.connect(lambda x: self.update_parameter(x, 'doubleSpinBox_conf'))
        self.doubleSpinBox_interval.valueChanged.connect(lambda x: self.update_parameter(x, 'doubleSpinBox_interval'))
        self.doubleSpinBox_iou.valueChanged.connect(lambda x: self.update_parameter(x, 'doubleSpinBox_iou'))
        self.horizontalSlider_conf.valueChanged.connect(lambda x: self.update_parameter(x, 'horizontalSlider_conf'))
        self.horizontalSlider_interval.valueChanged.connect(lambda x: self.update_parameter(x, 'horizontalSlider_interval'))
        self.horizontalSlider_iou.valueChanged.connect(lambda x: self.update_parameter(x, 'horizontalSlider_iou'))
        self.horizontalSlider_timer.valueChanged.connect(lambda x: self.update_parameter(x, 'horizontalSlider_timer'))
        self.comboBox_model.currentTextChanged.connect(self.choose_model)
        self.comboBox_tracker.currentTextChanged.connect(self.choose_tracker)
        self.pushButton_cam.clicked.connect(self.process_camera)
        self.pushButton_file.clicked.connect(self.process_file)
        self.pushButton_stop.clicked.connect(self.stop_video)
        self.pushButton_play.clicked.connect(self.play_or_pause)

        # 更新pt模型列表
        self.comboBox_model.clear()
        self.pt_list = os.listdir('./pt')
        self.pt_list = [file for file in self.pt_list if file.endswith('.pt')]
        self.pt_list.sort(key=lambda x: os.path.getsize('./pt/'+x))
        self.comboBox_model.clear()
        self.comboBox_model.addItems(self.pt_list)
        
        # init 显示
        self.det_thread.send_img.connect(lambda x: self.show_image(x, self.label_display))
        self.det_thread.send_result.connect(self.update_statistic_table)
    
    def play_or_pause(self):
        self.det_thread.play_status = not self.det_thread.play_status
        print(f"Play or Pause: {self.det_thread.play_status}")
        if self.det_thread.play_status:
            self.det_thread.timer.start(30)  # 以 30 FPS 初始启动
        
        # self.det_thread.play_status = not self.det_thread.play_status
        # print(f"Play or Pause: {self.det_thread.play_status}")
        # if self.det_thread.play_status:
        #     self.det_thread.start()
        #     self.display_thread.start()
    
    def resizeEvent(self, event:QtGui.QResizeEvent):
        # self.screen_size = (self.label_display.width(), self.label_display.height())
        # self.display_thread.get_screen_size(self.screen_size)
        # self.file_process_thread.get_screen_size(self.screen_size)
        # QtWidgets.QMainWindow.resizeEvent(self, event)
        pass
        
    def update_parameter(self, x, flag):
        if flag == 'doubleSpinBox_conf':
            self.horizontalSlider_conf.setValue(int(x*100))
            # self.conf_thr = float(x)
        elif flag == 'doubleSpinBox_interval':
            self.horizontalSlider_interval.setValue(int(x*100))
            # self.frame_interval = float(x)
            # self.file_process_thread.set_frame_interval(self.frame_interval)
        elif flag == 'doubleSpinBox_iou':
            self.horizontalSlider_iou.setValue(int(x*100))
            # self.det_thread.set_color_R = float(x)
        elif flag == 'horizontalSlider_conf':
            self.doubleSpinBox_conf.setValue(x/100)
            self.det_thread.set_color_R = float(x/100)
            # print(self.conf_thr)
        elif flag == 'horizontalSlider_interval':
            self.doubleSpinBox_interval.setValue(x/100)
            # self.frame_interval = float(x/100)
            self.det_thread.set_color_B = float(x/100)
        elif flag == 'horizontalSlider_iou':
            self.doubleSpinBox_iou.setValue(x/100)
            self.det_thread.set_color_G = float(x/100)
        elif flag == 'horizontalSlider_timer':
            # self.doubleSpinBox_timer.setValue(x/100)
            self.det_thread.set_timer = float(x/10)
            print(f"set timer is {self.det_thread.set_timer}")
        # if self.det_thread.play_status:
        #     self.ai_thread.set_confidence_threshold(self.conf_thr)
        #     self.ai_thread.set_iou_threshold(self.iou_thr)
        # if self.file_process_thread.isRunning:
        #     self.file_process_thread.set_confidence_threshold(self.conf_thr)
        #     self.file_process_thread.set_iou_threshold(self.iou_thr)

    def get_ai_task(self, btn):
        # print(btn.text())
        # print("choose task!!!!!!!!!!!!!!")
        if btn.text() == '精密级':
            if btn.isChecked() == True:
                self.ai_task = "object_detection"
                self.det_thread.control_conf_status = 0.35
        elif btn.text() == '标准级':
            if btn.isChecked() == True:
                self.ai_task = "pose_detection"
                self.det_thread.control_conf_status = 0.55
        elif btn.text() == '基础级':
            if btn.isChecked() == True:
                self.ai_task = "segmentation"
                self.det_thread.control_conf_status = 0.75
    
    def choose_model(self):
        self.model_name = self.comboBox_model.currentText()
        print(self.model_name)
        self.det_thread.weights = f"./pt/{self.model_name}"
        self.det_thread.change_model_status = True
        # self.model_name = self.model_name.lower()
    
    def choose_tracker(self):
        # print(self.comboBox_tracker.currentText())
        self.tracker_name = self.comboBox_tracker.currentText()
        # self.tracker_name = self.tracker_name.lower()
        if self.tracker_name == "原图":
            self.det_thread.img_choose = "origin"
        elif self.tracker_name == "检测图":
            self.det_thread.img_choose = "detection"
            
    def buttons_states(self, work_state):
        if work_state == "waiting_for_setting":
            self.radioButton_det.setDisabled(False)
            self.radioButton_pose.setDisabled(False)
            self.radioButton_seg.setDisabled(False)
            self.comboBox_model.setDisabled(False)
            self.comboBox_tracker.setDisabled(False)
            self.pushButton_cam.setDisabled(False)
            self.pushButton_file.setDisabled(False)
            self.pushButton_play.setDisabled(False)
            self.pushButton_stop.setDisabled(False)
            self.doubleSpinBox_conf.setDisabled(False)
            self.horizontalSlider_conf.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(False)
            self.horizontalSlider_interval.setDisabled(False)
            self.doubleSpinBox_iou.setDisabled(False)
            self.horizontalSlider_iou.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(False)
            self.horizontalSlider_interval.setDisabled(False)
        elif work_state == "processing_on_camera":
            self.pushButton_play.click
            self.radioButton_det.setDisabled(True)
            self.radioButton_pose.setDisabled(True)
            self.radioButton_seg.setDisabled(True)
            self.comboBox_model.setDisabled(True)
            self.comboBox_tracker.setDisabled(True)
            self.pushButton_cam.setDisabled(True)
            self.pushButton_file.setDisabled(True)
            self.pushButton_play.setDisabled(True)
            self.pushButton_stop.setDisabled(False)
            self.doubleSpinBox_conf.setDisabled(False)
            self.horizontalSlider_conf.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(True)
            self.horizontalSlider_interval.setDisabled(False)
            self.doubleSpinBox_iou.setDisabled(False)
            self.horizontalSlider_iou.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(True)
            self.horizontalSlider_interval.setDisabled(True)
        elif work_state == "processing_on_file":
            self.radioButton_det.setDisabled(True)
            self.radioButton_pose.setDisabled(True)
            self.radioButton_seg.setDisabled(True)
            self.comboBox_model.setDisabled(True)
            self.comboBox_tracker.setDisabled(True)
            self.pushButton_cam.setDisabled(True)
            self.pushButton_file.setDisabled(True)
            self.pushButton_play.setDisabled(False)
            self.pushButton_stop.setDisabled(False)
            self.doubleSpinBox_conf.setDisabled(False)
            self.horizontalSlider_conf.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(False)
            self.horizontalSlider_interval.setDisabled(False)
            self.doubleSpinBox_iou.setDisabled(False)
            self.horizontalSlider_iou.setDisabled(False)
            self.doubleSpinBox_interval.setDisabled(False)
            self.horizontalSlider_interval.setDisabled(False)
    
    def process_camera(self):
        video_source = self.get_stream_source()
        print("SOURCE", video_source)
        if video_source is not None:
            self.det_thread.source = "0"
            # self.ai_thread.set_start_config(
                # ai_task=self.ai_task,
                # model_name=self.model_name,
                # tracker_name=self.tracker_name)
        
            # self.camera_thread.set_start_config(video_source=video_source)
            # self.display_thread.set_start_config([self.label_display.width(),self.label_display.height()])

            # self.camera_thread.send_frame.connect(self.display_thread.get_fresh_frame)
            # self.camera_thread.send_frame.connect(self.ai_thread.get_frame)
            # self.det_thread.send_ai_output.connect(self.display_thread.get_ai_output)
            # self.det_thread.send_img.connect(lambda x: self.show_image(x, self.label_display))
            # self.det_thread.send_result.connect(self.update_statistic_table)
            # self.display_thread.send_thread_start_stop_flag.connect(self.buttons_states)
            # print("det Starting")
            # self.det_thread.start()
            # self.display_thread.start()
            # self.camera_thread.start()

        
    def process_file(self):
        img_fm = (".tif", ".tiff", ".jpg", ".jpeg", ".gif", ".png", ".eps", ".raw", ".cr2", ".nef", ".orf", ".sr2", ".bmp", ".ppm", ".heif")
        vid_fm = (".flv", ".avi", ".mp4", ".3gp", ".mov", ".webm", ".ogg", ".qt", ".avchd")
        file_list = " *".join(img_fm+vid_fm)
        file_name, _ = QtWidgets.QFileDialog.getOpenFileName(self, "choose an image or video file", "./data", f"Files({file_list})")
        if file_name:
            print(file_name)
            self.det_thread.source = file_name
            
            # self.file_process_thread.set_start_config(
            #     video_path=file_name,
            #     ai_task=self.ai_task,
            #     screen_size=[self.label_display.width(),self.label_display.height()],
            #     model_name=self.model_name,
            #     tracker_name=self.tracker_name,
            #     confidence_threshold=self.conf_thr,
            #     iou_threshold=self.iou_thr,
            #     frame_interval=self.frame_interval)
            # self.file_process_thread.send_ai_output.connect(self.update_statistic_table)
            # self.file_process_thread.send_display_frame.connect(self.update_display_frame)
            # self.file_process_thread.send_play_progress.connect(self.progressBar_play.setValue)
            # self.file_process_thread.send_thread_start_finish_flag.connect(self.buttons_states)
            # self.file_process_thread.start()

    def stop_video(self):
        self.det_thread.stop_status = True
        
        # self.display_thread.stop_display()
        # self.ai_thread.stop_process()
        # self.camera_thread.stop_capture()
        # self.file_process_thread.stop_process()

    # def update_display_frame(self, showImage):
    #     self.label_display.setPixmap(QtGui.QPixmap.fromImage(showImage))
    
    def show_image(self, img_src, label):
        try:
            ih, iw, _ = img_src.shape
            w = label.geometry().width()  # 框的宽度
            h = label.geometry().height()  # 框的高度
        
            # keep original aspect ratio
            if iw / ih > w / h:
                scal = w / iw
                nw = w
                nh = int(scal * ih)
            else:
                scal = h / ih
                nw = int(scal * iw)
                nh = h

            # Resize the image
            img_src_ = cv2.resize(img_src, (nw, nh))

            # Add black borders to fit the label size (w, h)
            top = (h - nh) // 2
            bottom = h - nh - top
            left = (w - nw) // 2
            right = w - nw - left

            # Add black border (value = 0 for black)
            img_src_ = cv2.copyMakeBorder(img_src_, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(0, 0, 0))

            # Convert the image to RGB format
            frame = cv2.cvtColor(img_src_, cv2.COLOR_BGR2RGB)
            img = QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[2] * frame.shape[1],
                         QImage.Format_RGB888)

            label.setPixmap(QPixmap.fromImage(img))

        except Exception as e:
            print(f"Error showing image: {e}")

    def clean_table(self):
        while (self.tableWidget_results.rowCount() > 0):
            self.tableWidget_results.removeRow(0)

    def update_statistic_table(self, ai_output):
        self.clean_table()
        self.tableWidget_results.setRowCount(0)
        # if ai_output == []:
            # return
        fps = ai_output[-1]
        ai_output = ai_output[0]
        for index, box in enumerate(ai_output):
            # self.tableWidget_results.setRowCount(2)
            index = "No." + str(index+1)
            each_item = [index, str(box[0]), str(fps), str(box[-1])]
            row = self.tableWidget_results.rowCount()
            self.tableWidget_results.insertRow(row)
            for j in range(len(each_item)):
                item = QtWidgets.QTableWidgetItem(str(each_item[j]))
                # item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
                item.setTextAlignment(int(Qt.AlignVCenter | Qt.AlignHCenter))

                # item.setTextAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
                # item.setTextAlignment(Qt.AlignHCenter)
                self.tableWidget_results.setItem(row, j, item)

    def get_stream_source(self):
        
        video_source, okPressed = QtWidgets.QInputDialog.getText(self, "Input Camera_ID or RTSP", "Camera ID or RTSP")
        if okPressed:
            if video_source.isdigit():
                return int(video_source)
            else:
                return video_source
        else:
            return None

if __name__ == '__main__':
    app = QtWidgets.QApplication(sys.argv)
    mainWindow = MainWindow()
    mainWindow.show()
    sys.exit(app.exec_())



