"""
该脚本实现stream模块的处理和播放的warp
"""

from concurrent.futures import Executor
from collections import deque
from typing import Literal, Tuple, List, Any
from utils.stream.base import BaseStream
from enum import Enum
from queue import Queue
from asyncio import Queue as AsyncQueue
from pathlib import Path
import threading, time, dataclasses, gc
import cv2, wave, tqdm, numpy as np
import torch

# %% 默认参数
BUFFER_TIME_SEC = 1 # 缺省缓冲时间1s内容
FPS = 25 # 图像帧率
SAMEPLE_RATE = 16_000 # 采样率
SAMPLE_WIDTH = 2 # 采样位宽 16位 = 2字节
CHANNELS = 2 # 声道数量

# %% 定义数据类型
@dataclasses.dataclass(frozen=True)
class FrameData():
  timestamp_ms: int    
  wave        : bytes  = None
  image_bgr   : object = None

@dataclasses.dataclass(frozen=True)
class ChunkData():
  timestamp_ms: int         
  wave_out    : bytes  = None # 输出的音频(此时chunk内该音频可能是上一帧, 根据模型要求处理, 因为有些模型需要较长的时域数据)
  waves_np    : object = None # merge后的时域数据
  image_out   : object = None # 输出的图像, 和wav_out同理(需要被paste back)
  images_rgb  : object = None # 带batch_num的图像数据

class AudioType(Enum): # 用于提前规范类型, 也可以在实例化时直接传type进去
  UNKOWN        = object     # ANY 
  BYTES         = bytes
  FRAME_DATA    = FrameData
  CHUNK_DATA    = ChunkData  

# %% Stream实现
class WaveFileReader(BaseStream):
  def __init__(self, 
               aud_file: str,
               sample_interval_ms: int = 1000/FPS, # fps=25时的采样间隔
               **kwargs):
    """
      这里注意point_per_sample是每次read的采样点数
      限制每一次采样的时长不能超过图片一帧的时长
    """
    self.aud_file = aud_file
    self.handler = wave.open(aud_file, 'rb')
    self.sr = self.handler.getframerate()
    self.sample_interval_ms = sample_interval_ms
    assert (self.sr * sample_interval_ms) % 1000 == 0 # 采样频率需要是整数
    self.point_per_sample = int(self.sr * sample_interval_ms / 1000)
    print("self.point_per_sample = ", self.point_per_sample, "self.sr = ", self.sr, "sample_interval_ms = ", sample_interval_ms)
    kwargs['max_queue_size'] = int(self.sr * BUFFER_TIME_SEC * SAMPLE_WIDTH * CHANNELS) # int16 = 2字节, 缓冲1s长度为 sr * 2 * 位深
    super().__init__(AudioType.UNKOWN.value, AudioType.BYTES.value, **kwargs)
    
  def stop(self):
    self.handler.close()
    super().stop()
    
  def __call__(self, idx=None):
    if idx is not None:
      self.handler.setpos(idx)
    if self.handler.getfp() is None: # 文件读取完毕后返None
      raise EOFError("File has been read completely.")
      return None
    output = self.handler.readframes(self.point_per_sample)
    if not output: # 文件读取完毕时返回None
      self.handler.close() # 关闭
    return output
    
class AudioBytes2Format(BaseStream):
  def __init__(self,
               sr: int = SAMEPLE_RATE,
               sample_width: int = SAMPLE_WIDTH,
               channels: int = CHANNELS,
               **kwargs
               ):
    self.sr = sr
    self.sample_width = sample_width
    self.channels = channels
    self.timestamp_ms = 0
    super().__init__(bytes, FrameData, **kwargs)
    
  def  __call__(self, data):
    if data is None:
      raise ValueError("Audio data is None.")
    frame_size = len(data) // (self.sample_width * self.channels) # 帧长度
    self.timestamp_ms += (frame_size / self.sr) * 1000 # 秒数
    return FrameData(int(self.timestamp_ms), data)
    
class CombineImage(BaseStream):
  def __init__(self,
               vid_file: str,
               **kwargs
               ):
    self.vid_file = vid_file
    self.cap = cv2.VideoCapture(vid_file)
    self.fps = self.cap.get(cv2.CAP_PROP_FPS)
    self.total_frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
    super().__init__(FrameData, FrameData, **kwargs)
    
  def stop(self):
    self.cap.release()
    super().stop()
    
  def __call__(self, data: FrameData):
    timestamp_ms = data.timestamp_ms
    wave = data.wave
    pos_target = (timestamp_ms * self.fps / 1000) % self.total_frames # 循环播放（如需镜像则要提前将视频镜像处理）
    diff = pos_target - self.cap.get(cv2.CAP_PROP_POS_FRAMES)
    if diff < 0: # 音频快于视频则返空等待音频
      return None
    elif diff > 1: # 音频快于视频太多则跳帧
      self.cap.set(cv2.CAP_PROP_POS_FRAMES, int(pos_target))
    else: # 音频和视频差距在1帧以内则正常读取
      pass
    ret, frame = self.cap.read()
    if not ret:
      self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    return FrameData(timestamp_ms, wave, frame)    

class Frame2Chunk(BaseStream):
  def __init__(self,
               stride_interval_ms: int = 40, # 跨步间隔
               snapshot_size: int = 5, # 储存帧数, 也是chunk的长度
               **kwargs
               ):
    self.snapshot_size = snapshot_size 
    self.dq: deque[FrameData] = deque(maxlen=snapshot_size) # chunk队列
    self.stride_interval_ms = stride_interval_ms
    super().__init__(AudioType.FRAME_DATA.value, AudioType.CHUNK_DATA.value, **kwargs)
    
  def __call__(self, data: FrameData) -> ChunkData:
    
    timestamp_ms_now = data.timestamp_ms
    self.dq.append(data)
    if timestamp_ms_now % self.stride_interval_ms != 0:
      return
    images_bgr = None
    image_chunk = []
    wave_chunk = []
    for frame in self.dq:
      # 这里默认是 2位宽 -> 16位
      wave_np = (np.frombuffer(frame.wave, dtype=np.int16) / 32768).astype(np.float32)
      wave_chunk.append(wave_np)
      if not frame.image_bgr is None:
        image_chunk.append(cv2.cvtColor(frame.image_bgr, cv2.COLOR_BGR2RGB)) # 转为RGB
    chunk_out_idx = min(int(self.snapshot_size//2), len(self.dq)-1) # 未满时取最后一帧
    timestamp_ms_chunk = self.dq[chunk_out_idx].timestamp_ms
    wave_out = self.dq[chunk_out_idx].wave
    image_out = self.dq[chunk_out_idx].image_bgr
    wave_np = np.concatenate(wave_chunk, axis=0)
    if len(image_chunk) > 0:
      images_bgr = np.stack(image_chunk, axis=0) # [B, H, W, C]
    out = ChunkData(timestamp_ms_chunk, 
                    wave_out,
                    wave_np, 
                    image_out,
                    images_bgr,
                    )
    return out

class Chunk2Out(BaseStream): # 无模型时直接输出
  def __call__(self, data: ChunkData) -> FrameData:
    output = FrameData(data.timestamp_ms, data.wave_out, data.image_out)
    return output
  
class BarrierPlayer(BaseStream):
  def __init__(self,
               fps: int = FPS,
               timeout: float = 1.0,
               sr: int = SAMEPLE_RATE,
               sample_width: int = SAMPLE_WIDTH,
               channels: int = CHANNELS,
               window_size: Tuple[int, int] = None,
               **kwargs
               ):
    import pyaudio # linux导入不了    
    self.audio_player = pyaudio.PyAudio()
    self.sr = sr
    self.stream = self.audio_player.open(
                    format = self.audio_player.get_format_from_width(sample_width),
                    channels = channels,
                    rate = sr,
                    output = True
                )
    self.window_name = "Video Player"
    self.play_thread = threading.Thread(target=self._play_loop, daemon=True) # 播放线程必须是单一的线程
    self.audio_thread = threading.Thread(target=self._play_audio, daemon=True) # 播放线程必须是单一的线程
    self.frame_interval = 1.0 / fps     
    self.fps = fps
    self.fidx = 0
    self.aidx = 0
    self.fast_idx = 0
    self.timeout = timeout
    self.buffer_size = int(fps * timeout)
    self.window_size = window_size
    self.frame_buffer: Queue[FrameData] = Queue(maxsize=self.buffer_size * 20) # 最多缓冲timeout时长
    self.audio_buffer: Queue[bytes] = Queue(maxsize=self.buffer_size * 20) # 最多缓冲timeout时长 , 要20倍audio_buffer也要加大呀
    self.play_thread.start()
    self.audio_thread.start()
    self.barrier = threading.Barrier(2)
    super().__init__(FrameData, type(None), **kwargs)
  
  def sync_index(self, index):
      self.fast_idx = max(index, self.fast_idx)
      if index == self.fast_idx:
          self.barrier.wait()
  
  def get_buffer_count(self):
    return self.frame_buffer.qsize()
  
  def _play_audio(self):
    while True:
      wave = self.audio_buffer.get(block=True, timeout=self.timeout) # 播放完后释放
      self.stream.write(wave)
      self.aidx += 1
      self.sync_index(self.aidx)
      print(f"AUDIO INDEX: {self.aidx}")
    
  def _play_loop(self):
    # 等待缓冲区塞满
    while True:
      if self.frame_buffer.qsize() > (self.fps * self.timeout):
        break
      else:
        print(f"Waiting for data...{self.buffer_size - self.frame_buffer.qsize()}")
        time.sleep(0.1)
        
    start_time = time.time()
    cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
    current_frames = 0
    while self.is_working:
      try:
        data = self.frame_buffer.get(block=True, timeout=self.timeout) # 播放完后释放
        timestamp_ms = data.timestamp_ms
        wave = data.wave
        frame = data.image_bgr
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # 写入
        if not self.window_size is None:
          frame = cv2.resize(frame, self.window_size) # 这里需要resize
        cv2.imshow(self.window_name, frame)
        # self.audio_buffer.put(wave, block=False, timeout=self.timeout)
        self.stream.write(wave)
        # 控制帧率
        current_time = time.time()
        self.fidx += 1       
        self.sync_index(self.fidx)
        print(f"FRAME INDEX: {self.fidx}, FPS: {self.fidx / (current_time - start_time)}") 
        # print(f"time: {timestamp_ms/1000}, FPS: {self.idx / (current_time - start_time)}, QSize: {self.frame_buffer.qsize()}")

        elapsed = start_time + timestamp_ms / 1000 - current_time
        if elapsed > 0:
            print("ELAPSED:", elapsed)
            time.sleep(elapsed)
        # 处理退出事件
        if cv2.waitKey(1) & 0xFF == ord('q'):
            self.stop()
      except Exception as e:
        cv2.destroyWindow(self.window_name)
        raise e
    self.stop()
    self.get_top_stream().stop()
    cv2.destroyWindow(self.window_name)
    
  def __call__(self, data: FrameData):
    if self._is_working:
      self.frame_buffer.put(data, block=True, timeout=self.timeout) # 播放时堵塞
    return
  
class PasteOn(BaseStream):
  def __init__(self, 
               video_file:str,
               bbox:list[int], # (x0, y0, witdh, height)
               mirror:bool = True,
               **kwargs,
               ):
    video_file:Path = Path(video_file)
    if not video_file.exists():
      raise FileNotFoundError(f"Video file {video_file} not found.")
    if mirror:
      video_file_reversed = video_file.parent / (video_file.stem + "_mirror" + video_file.suffix)
      if not video_file_reversed.exists():
        print("Generating reversed video...")
        self.generate_reversed_video(video_file, video_file_reversed)
      video_file = video_file_reversed
    self.cap = cv2.VideoCapture(video_file)
    self.bbox = bbox
    self.fps = self.cap.get(cv2.CAP_PROP_FPS)
    super().__init__(FrameData, FrameData, **kwargs)
    
  def generate_reversed_video(self, video_file:Path, video_file_mirror:Path):
    cap = cv2.VideoCapture(str(video_file))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(str(video_file_mirror), fourcc, 
                          cap.get(cv2.CAP_PROP_FPS),
                          (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    for idx in tqdm.tqdm(range(frame_count)):
      _, frame = cap.read()
      out.write(frame)
    for idx in tqdm.tqdm(range(frame_count, 0, -1)):
      cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
      _, frame = cap.read()
      out.write(frame)
    cap.release()
    out.release()
  
  def __call__(self, data: FrameData) -> FrameData:
    if data.image_bgr is None or data.wave is None:
      raise ValueError("FrameData must have image and wave.")
    success, frame = self.cap.read()
    if not success:
      self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
      _, frame = self.cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    org_img = data.image_bgr
    x0, y0, w, h = self.bbox
    resized_img:np.ndarray = cv2.resize(org_img, (w, h))
    if np.max(resized_img) < 2.:
      resized_img = (resized_img * 255).astype(np.uint8)
    # 这里需要注意resize的顺序, 先resize后paste
    frame[y0:y0+h, x0:x0+w] = resized_img
    return FrameData(data.timestamp_ms, data.wave, frame)


class MutePlayer(BaseStream):
  def __init__(self,
               fps: int = FPS,
               timeout: float = 5.0,
               window_size: Tuple[int, int] = None,
               **kwargs
               ):
    
    self.window_name = "Video Player"
    self.play_thread = threading.Thread(target=self._play_loop, daemon=True) # 播放线程必须是单一的线程
    self.frame_interval = 1.0 / fps     
    self.fps = fps
    self.idx = 0
    self.timeout = timeout
    self.window_size = window_size
    self.buffer_size = int(fps * timeout)
    self.frame_buffer: Queue[FrameData] = Queue(maxsize=self.buffer_size) # 最多缓冲timeout时长
    self.play_thread.start()
    super().__init__(FrameData, type(None), **kwargs)
    
  def _play_loop(self):
    # 等待缓冲区塞满
    while True:
      if self.frame_buffer.full():
        break
      else:
        # print(f"Waiting for data...{self.buffer_size - self.frame_buffer.qsize()}")
        time.sleep(0.1)
        
    start_time = time.time()
    cv2.namedWindow(self.window_name, cv2.WINDOW_AUTOSIZE)
    
    while self.is_working:
      try:
        data = self.frame_buffer.get(block=True, timeout=self.timeout) # 播放完后释放
        timestamp_ms = data.timestamp_ms
        wave = data.wave
        frame = data.image_bgr
        if isinstance(frame, torch.Tensor):
          frame = frame.data.cpu().numpy()
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # 写入
        if not self.window_size is None:
          frame = cv2.resize(frame, self.window_size) # 这里需要resize
        cv2.imshow(self.window_name, frame)
        
        # 控制帧率
        current_time = time.time()
        self.idx += 1
        # print(f"time: {timestamp_ms/1000}, FPS: {self.idx / (current_time - start_time)}")
        elapsed = start_time + timestamp_ms / 1000 - current_time
        if elapsed > 0:
          time.sleep(elapsed)
        
        # 处理退出事件
        if cv2.waitKey(1) & 0xFF == ord('q'):
            self.stop()
      except Exception as e:
        cv2.destroyWindow(self.window_name)
        raise e
    self.stop()
    self.get_top_stream().stop()
    cv2.destroyWindow(self.window_name)
    
  def __call__(self, data: FrameData):
    if self._is_working:
      self.frame_buffer.put(data, block=True, timeout=self.timeout) # 播放时堵塞
    return


if __name__ == '__main__':
  import asyncio
  async def main():
    from concurrent.futures import ThreadPoolExecutor
    with ThreadPoolExecutor(max_workers=4) as executor:
      stream1 = WaveFileReader("aud.wav", executor=executor)
      stream2 = AudioBytes2Format(executor=executor)
      stream3 = CombineImage("test.mp4", executor=executor)
      stream4 = Frame2Chunk(executor=executor)
      stream5 = Chunk2Out(ChunkData, FrameData, executor=executor)
      stream6 = PasteOn("anchorwoman.mp4", (277, 636, 508, 508), executor=executor)
      stream7 = BarrierPlayer(executor=executor)
    
      stream1.link_to(stream2).link_to(stream3).link_to(stream4).link_to(stream5).link_to(stream6).link_to(stream7)
      await stream1.async_start()
        
  asyncio.run(main())