import time
import traceback
from queue import Queue
from threading import Thread

import cv2


class FileVideoStream:

    def __init__(self, path, fps_limit=0, queueSize=128, cap_mode='normal'):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.stream = cv2.VideoCapture(path)
        self.stopped = False
        self.count = 0
        self.Q = Queue(maxsize=queueSize)
        self.fps_limit = fps_limit if fps_limit else int(self.stream.get(cv2.CAP_PROP_FPS))  # 帧率不能太高，读取帧耗时不够
        self.fps_last_time = time.time()
        self.cap_mode = cap_mode
        self.width = int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.height = int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
        # self.fps = int(self.stream.get(cv2.CAP_PROP_FPS))
        self.total_frames = int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
        self.last_frame_index = 0
        self.current_frame_index = 0

        # We need some info from the file first. See more at:
        # https://docs.opencv.org/4.1.0/d4/d15/group__videoio__flags__base.html#gaeb8dd9c89c10a5c63c139bf7c4f5704d
        # if width:
        #     self.width = width
        # else:
        #     self.width = int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
        # if height:
        #     self.height = height
        # else:
        #     self.height = int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
        # print(f'设置摄像头分辨率为：width-{self.width} height-{self.height}')
        # self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        # self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
        # self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))

        # since this version uses UMat to store the images to
        # we need to initialize them beforehand
        self.frames = [0] * queueSize
        for ii in range(queueSize):
            # self.frames[ii] = 0
            if self.cap_mode == 'UMat':
                self.frames[ii] = cv2.UMat(self.height, self.width, cv2.CV_8UC3)
                # self.frames[ii] = cv2.UMat(self.height, self.width, cv2.CV_32SC3)
            else:
                self.frames[ii] = 0

    def __del__(self):
        self.stream.release()

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            try:
                if self.stopped:
                    return

                self.count += 1
                target = (self.count - 1) % self.Q.maxsize
                # print("target::::::::", target)
                self.Q.put(target)  # 先推入队列再赋值，修改到上一轮的图片，导致取出来存在问题
                if self.cap_mode == 'UMat':
                    grabbed = self.stream.grab()
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                    if not grabbed:
                        self.stop()
                        return
                    self.stream.retrieve(self.frames[target])
                else:
                    is_success, frame = self.stream.read()
                    if is_success:
                        self.frames[target] = frame
                    else:
                        self.frames[target] = None
                        self.stop()
                        return
                self.Q.put(target)  # 赋值完成再推队列，确保帧可用
            except Exception as e:
                traceback.print_exc()

    def read(self):
        if self.stopped and self.Q.qsize() == 0:
            return None

        while not self.more() and not self.stopped:
            time.sleep(0)

        self.Q.get() # 每帧取队列两个值，避免取太快，导致取出来是0
        curr_time = time.time()
        diff_time = curr_time - self.fps_last_time

        if diff_time < (1 / self.fps_limit):
            # 需要额外等待
            t = round((1 / self.fps_limit) - diff_time, 3)

            time.sleep(t)
        # print(self.fps_last_time, "\t", self.last_frame_index, "\t", diff_time, "\t", t)

        self.fps_last_time = time.time()

        frame = self.frames[self.last_frame_index % self.Q.maxsize]
        self.last_frame_index = self.Q.get() + 1
        self.current_frame_index += 1
        return frame

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def stop(self):
        # indicate that the thread should be stopped
        print('视频流停止')
        self.stopped = True

    def isOpened(self):
        return self.stream.isOpened()

    def release(self):
        return self.stream.release()