import itertools
import queue
import sys
import threading
import traceback
from typing import Optional, Union
from aiortc import MediaStreamTrack, VideoStreamTrack

from common.log.logUtils import logUtils
from rtcCVTransform.videoTrans_Base import videoTransformBase
import numpy as np
import av

# the self define stop signal
__SENTINEL__ = "__SENTINEL__"

from serversRtc.imageQueue import imageQueue

VIDEO_TAG = "video"


class VideoTransformTrack(MediaStreamTrack):
    kind = VIDEO_TAG

    def __init__(self, track: MediaStreamTrack, video_transform: videoTransformBase):
        super().__init__()
        self.track = track
        self.transformer = video_transform

    async def recv(self):
        frame = await self.track.recv()

        img = self.transformer.transform(frame)

        # rebuild a av.VideoFrame,processing timing information
        new_frame = av.VideoFrame.from_ndarray(img, format="bgr24")

        new_frame.pts = frame.pts

        new_frame.time_base = frame.time_base

        return new_frame


# See https://stackoverflow.com/a/42007659
video_transform_thread_id_generator = itertools.count()


class monitorTransformTrack(VideoStreamTrack):
    kind = "video"

    def __init__(self, roomId: str):
        super().__init__()  # don't forget this!
        self.roomId = roomId

    async def recv(self):
        pts, time_base = await self.next_timestamp()
        frameData = imageQueue.getImgFromQueue(self.roomId)
        new_frame = av.VideoFrame.from_ndarray(
            frameData['img'], format="bgr24"
        )
        new_frame.pts = pts
        new_frame.time_base = time_base
        return new_frame


#
class AsyncVideoTransformTrack(MediaStreamTrack):
    kind = "video"

    _in_queue: queue.Queue

    def __init__(
            self,
            track: MediaStreamTrack,
            video_transformer: videoTransformBase,
            stop_timeout: Optional[float] = None,
            roomId: str = None
    ):
        super().__init__()  # don't forget this!
        self.track = track
        self.transformer = video_transformer

        self._thread = threading.Thread(
            target=self._run_worker_thread,
            name=f"async_video_transformer_{next(video_transform_thread_id_generator)}",
        )
        self._in_queue = queue.Queue()
        self._latest_result_img_lock = threading.Lock()

        self._busy = False
        self._latest_result_img: Union[np.ndarray, None] = None

        self._thread.start()

        self.stop_timeout = stop_timeout

        self.roomId = roomId
        print("创建AsyncVideoTransformTrack.......")

    def _run_worker_thread(self):
        try:
            print("创建处理图片线程....")
            self._worker_thread()
        except Exception:
            logUtils.error("Error occurred in the WebRTC thread:")

            exc_type, exc_value, exc_traceback = sys.exc_info()
            for tb in traceback.format_exception(exc_type, exc_value, exc_traceback):
                for tbline in tb.rstrip().splitlines():
                    logUtils.error(tbline.rstrip())

    def _worker_thread(self):
        while True:
            item = self._in_queue.get()
            print(item)
            if item == __SENTINEL__:
                break

            stop_requested = False
            while not self._in_queue.empty():
                item = self._in_queue.get_nowait()
                if item == __SENTINEL__:
                    stop_requested = True
            if stop_requested:
                break

            if item is None:
                raise Exception("A queued item is unexpectedly None")

            result_img = self.transformer.transform(item)

            with self._latest_result_img_lock:
                self._latest_result_img = result_img

            # 将处结果存入队列
            imageQueue.putImgToQueue(self.roomId,
                                     {
                                         "img": self._latest_result_img,
                                         "pts": item.pts,
                                         "time_base": item.time_base
                                     })

    def stop(self):
        self._in_queue.put(__SENTINEL__)
        self._thread.join(self.stop_timeout)

        return super().stop()

    async def recv(self):
        frame = await self.track.recv()
        self._in_queue.put(frame)

        with self._latest_result_img_lock:
            if self._latest_result_img is not None:
                # rebuild a av.VideoFrame, preserving timing information
                new_frame = av.VideoFrame.from_ndarray(
                    self._latest_result_img, format="bgr24"
                )
                new_frame.pts = frame.pts
                new_frame.time_base = frame.time_base
                return new_frame
            else:
                return frame
