import asyncio
import json
import logging
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack
import time
import fractions
from typing import Tuple, Dict, Optional, Set, Union
import av
from av.frame import Frame
from av.packet import Packet
from av import AudioFrame
import numpy as np
import cv2
import queue
import threading
import wave
import pyaudio

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 全局变量
pcs = set()

# 常量定义
AUDIO_PTIME = 0.020  # 20ms audio packetization
VIDEO_CLOCK_RATE = 90000
VIDEO_PTIME = 1 / 30  # 30fps
VIDEO_TIME_BASE = fractions.Fraction(1, VIDEO_CLOCK_RATE)
SAMPLE_RATE = 16000
AUDIO_TIME_BASE = fractions.Fraction(1, SAMPLE_RATE)

class PlayerStreamTrack(MediaStreamTrack):
    """
    A video or audio track that returns frames.
    """
    def __init__(self, player, kind):
        super().__init__()  # don't forget this!
        self.kind = kind
        self._player = player
        self._queue = asyncio.Queue()
        self.timelist = []  # 记录最近包的时间戳
        if self.kind == 'video':
            self.framecount = 0
            self.lasttime = time.perf_counter()
            self.totaltime = 0

    async def next_timestamp(self) -> Tuple[int, fractions.Fraction]:
        if self.readyState != "live":
            raise Exception

        if self.kind == 'video':
            if hasattr(self, "_timestamp"):
                self._timestamp += int(VIDEO_PTIME * VIDEO_CLOCK_RATE)
                wait = self._start + (self._timestamp / VIDEO_CLOCK_RATE) - time.time()
                if wait > 0:
                    await asyncio.sleep(wait)
            else:
                self._start = time.time()
                self._timestamp = 0
                self.timelist.append(self._start)
            return self._timestamp, VIDEO_TIME_BASE
        else:  # audio
            if hasattr(self, "_timestamp"):
                self._timestamp += int(AUDIO_PTIME * SAMPLE_RATE)
                wait = self._start + (self._timestamp / SAMPLE_RATE) - time.time()
                if wait > 0:
                    await asyncio.sleep(wait)
            else:
                self._start = time.time()
                self._timestamp = 0
                self.timelist.append(self._start)
            return self._timestamp, AUDIO_TIME_BASE

    async def recv(self) -> Union[Frame, Packet]:
        self._player._start(self)
        frame, eventpoint = await self._queue.get()
        if frame is None:
            self.stop()
            raise Exception
        pts, time_base = await self.next_timestamp()
        frame.pts = pts
        frame.time_base = time_base
        if eventpoint:
            self._player.notify(eventpoint)
        if self.kind == 'video':
            self.totaltime += (time.perf_counter() - self.lasttime)
            self.framecount += 1
            self.lasttime = time.perf_counter()
        return frame

    def stop(self):
        super().stop()
        if self._player is not None:
            self._player._stop(self)
            self._player = None

class HumanPlayer:
    def __init__(self, lipModel, audio_Path, format=None, options=None, timeout=None, loop=False, decode=True):
        self.__thread: Optional[threading.Thread] = None
        self.__thread_quit: Optional[threading.Event] = None

        # examine streams
        self.__started: Set[PlayerStreamTrack] = set()
        self.__video: Optional[PlayerStreamTrack] = None
        self.__audio: Optional[PlayerStreamTrack] = None

        self.__video = PlayerStreamTrack(self, kind="video")

        self.__container = lipModel
        self.frame_queue = lipModel.frame_queue  # 获取 LipModel 的帧队列

    def notify(self, eventpoint):
        self.__container.notify(eventpoint)

    @property
    def video(self) -> MediaStreamTrack:
        """
        A :class:`aiortc.MediaStreamTrack` instance if the file contains video.
        """
        return self.__video

    def _start(self, track: PlayerStreamTrack) -> None:
        self.__started.add(track)
        if self.__thread is None:
            self.__log_debug("Starting worker thread")
            self.__thread_quit = threading.Event()
            self.__thread = threading.Thread(
                name="media-player",
                target=self.player_worker_thread,
                args=(
                    self.__thread_quit,
                    self.__video,
                ),
            )
            self.__thread.start()

    def _stop(self, track: PlayerStreamTrack) -> None:
        self.__started.discard(track)

        if not self.__started and self.__thread is not None:
            self.__log_debug("Stopping worker thread")
            self.__thread_quit.set()
            self.__thread.join()
            self.__thread = None

        if not self.__started and self.__container is not None:
            self.__container = None

    def __log_debug(self, msg: str, *args) -> None:
        logger.debug(f"HumanPlayer {msg}", *args)

    def player_worker_thread(self, quit_event, video_track):
        """
        工作线程，从 LipModel 的帧队列中获取帧，并传递给 PlayerStreamTrack
        """
        while not quit_event.is_set():
            try:
                # 尝试从队列中获取帧，设置超时时间避免阻塞
                frame = self.frame_queue.get(timeout=0.1)
                if frame is None:
                    break
                # 将 BGR 转换为 RGB
                if isinstance(frame, np.ndarray) and frame.ndim == 3 and frame.shape[2] == 3:
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                # 将 NumPy 数组转换为 av.frame.Frame 对象
                if isinstance(frame, np.ndarray):
                    frame = av.VideoFrame.from_ndarray(frame, format='rgb24')
                # 将帧传递给视频轨道
                if video_track:
                    video_track._queue.put_nowait((frame, None))
                    # logger.info("Video frame passed to track")
            except queue.Empty:
                # 如果队列为空，继续等待
                continue