from utils.stream.base import BaseStream
from utils.stream.play import ChunkData, FrameData, FPS
from utils.mel_torch import melspectrogram as melspectrogram_torch
from utils.stream.play import WaveFileReader, AudioBytes2Format, PasteOn, Frame2Chunk, MutePlayer, BarrierPlayer
from scene.gaussian_model import GaussianModel
from scene.networks import AudioEncoder
from scene.utils import melspectrogram
from scene.dataset import FourDGSdataset, Camera, LightCamera
from gaussian_renderer import render_from_batch_infer as render_from_batch
from collections import deque
from typing import Any, Union, Literal, NamedTuple
from threading import Thread, Lock
from multiprocessing import Process, Queue as MPQueue
from queue import Queue
from torch import nn
from torch.nn import functional as F
import numpy as np, dataclasses
import torch, librosa, cv2, time
import asyncio

# %% dataclass
@dataclasses.dataclass(frozen=True)
class ChunkFeature:
  timestamp_ms: int         
  wave_out    : bytes        = None # 输出的音频
  image_out   : np.ndarray   = None # 输出的图像
  ave_t       : torch.Tensor = None # 带batch_num的ave数据
  images_rgb  : np.ndarray   = None # 带batch_num的图像数据
  # debug
  mel_t       : torch.Tensor = None
  waves_np    : np.ndarray   = None
  
  
@dataclasses.dataclass(frozen=True)
class ChunkCameras:
  times   : list[int]
  cameras : list[LightCamera]
  waves   : list[bytes]  = None 

# %% functions
def gaussian_blur_masked(image, edge, kernel_size=15, sigma=5):    
    from torchvision.transforms.functional import gaussian_blur
    # 创建高斯核
    channels = image.size(1)
    kernel = torch.ones(channels, 1, kernel_size, kernel_size)
    kernel = kernel.to(image.device)
    # 应用高斯模糊
    blurred = gaussian_blur(image, kernel_size, sigma)
    # 使用mask混合原始图像和模糊图像
    result = image * (1 - edge) + blurred * edge
    return result

# %% streamer
class Chunk2Avefeat(BaseStream):
  def __init__(self,
               model_path: str = 'scene/checkpoints/audio_visual_encoder.pth',
               device: Union[Literal['cuda', 'cpu'], torch.device]= 'cuda',
               **kwargs
               ):
    self.device = device
    model = AudioEncoder().to(device).eval()
    ckpt = torch.load(model_path)
    model.load_state_dict({f'audio_encoder.{k}': v for k, v in ckpt.items()})
    self.audio_encoder = model
    self.smooth_coef = 0.5
    self.ave_cache = torch.zeros(8, 512, device=device)
    self.t1 = time.time()
    super().__init__(ChunkData, ChunkFeature, **kwargs)
    
  def fold(self, mel: torch.Tensor):
    kernel_size = (80, 16)
    stride = (1, 1)
    patches = F.unfold(mel.unsqueeze(0).unsqueeze(0), kernel_size=kernel_size, stride=stride)
    mel_fold = patches[0].reshape(*kernel_size, -1).permute(2, 0, 1) # n * 80 * 16
    return mel_fold
    
  def __call__(self, data: ChunkData) -> ChunkFeature:
    # mel = melspectrogram(data.waves_np).T # <- cpu密集部分，改为下面gpu运算
    mel = melspectrogram_torch(data.waves_np, self.device) # 80 * len
    mlen = mel.shape[1]
    n = 24 # file://./scene/utils.py#446
    if mlen < 24:
      print(f"{mlen}: need padding")
      pad_pre = int(np.ceil((n - mlen)/2))
      pad_lst = int(np.floor((n - mlen)/2))
      padding_tuple = (pad_pre, pad_lst, 0, 0)
      mel = F.pad(mel, padding_tuple, mode='constant')
      mlen = mel.shape[1] # 更新长度
    slicer = slice(mlen//2 - 12, mlen//2 + 11) # get 16n * 80, 且特征定位一直指向正中心
    mel_t = self.fold(mel[:, slicer]).unsqueeze(1) # n * 1 * 80 * 16 
    with torch.no_grad():
      ave_t: torch.Tensor = self.audio_encoder.forward(mel_t.to(self.device)) # BCHW -> n*512
      ave_t = self.ave_cache = self.ave_cache * self.smooth_coef + ave_t * (1 - self.smooth_coef)
      
    passtime = time.time() - self.t1
    # print(f"module: {self.__class__.__name__}, time {passtime}, fps:{self.batch_size/passtime}")
    self.t1 = time.time()
    return ChunkFeature(data.timestamp_ms,
                        data.wave_out,
                        data.image_out,
                        ave_t.unsqueeze(1), # B*1*512
                        data.images_rgb,
                        mel_t,
                        data.waves_np,
                        ) 
    
class Avefeat2ModelIn(BaseStream):
  def __init__(self,
               dataset: FourDGSdataset, # scene.getCameras()获取 TODO: 可能需要实现一个getStreamCameras()
               device: Union[Literal['cuda', 'cpu'], torch.device]= 'cuda',
               lazy_cache_size: int = 25, # 懒加载耗时很大, 需要提前加载
               batch_size: int = 16,
               interval_per_frame_ms: int = 1000/FPS, # 1000/fps
               **kwargs
               ):
    self.device = device
    self._is_working = True
    self.idx = 0 # running_idx
    self.idx_cache = 0 # 用于懒加载, 指向 idx + lazy_cache_size
    dataset.light_mode = True
    dataset.lazy_load = True
    self.dataset = dataset
    self.last_camera = dataset[0]()
    super().__init__(ChunkFeature, ChunkCameras, **kwargs)
    self.last_time = time.time()
    if dataset.lazy_load:
      self.preload_cameras: deque[Camera] = deque(maxlen=lazy_cache_size)
      self.preload_thread = self._executor.submit(self.preload) # 预加载线程
    self.batch_queue = Queue(maxsize=batch_size)
    self.batch_size = batch_size
    self.interval_ms = interval_per_frame_ms * batch_size
    self.porter_thread = Thread(target=self.porter, daemon=True) # 搬运线程
    self.porter_thread.start()
    
  def stop(self):
    super().stop()
    self.porter_thread.join()
    print("porter thread joined")
    
  def preload(self):
    while self.is_working:
      if len(self.preload_cameras) < self.preload_cameras.maxlen:
        if self.idx_cache >= len(self.dataset):
          break
        camera_lazy = self.dataset[self.idx_cache]
        camera:Camera = camera_lazy()
        self.preload_cameras.append(camera)
        self.idx_cache += 1
      else:
        time.sleep(0.01) # 等待
        
  def porter(self):
    while self.is_working:
      full_active = self.batch_queue.full()
      time_active = (time.time() - self.last_time) > self.interval_ms/1000
      if full_active or time_active:
        waves, cameras, times = [], [], []
        while self.batch_queue.qsize() > 0:
          time_ms, camera, wave = self.batch_queue.get()
          waves.append(wave)
          times.append(time_ms)
          cameras.append(camera)
        self.put(ChunkCameras(times, cameras, waves))
      else:
        time.sleep(0.01) # 等待
      self.last_time = time.time()
    
  def __call__(self, data: ChunkFeature) -> ChunkCameras:
    # if condition_pause: # 此处return可以减少batch_queue压力, 保持cuda_stream的稳定
    #   return 
    if self.dataset.lazy_load:
      if len(self.preload_cameras) == 0:
        if self.last_camera is None:
          return
        camera = self.last_camera
      else:
        camera = self.preload_cameras.popleft() # 左端获取
    else:
      camera:Camera = self.dataset[self.idx] # 直接获取
    camera.aud_f = data.ave_t # 覆盖aud_f
    self.last_camera = camera
    self.batch_queue.put((data.timestamp_ms, camera, data.wave_out))
  
class Model2Output(BaseStream):
  def __init__(self,
               gaussian: GaussianModel,
               erode_size: int=9,
               device: Union[Literal['cuda', 'cpu'], torch.device]= 'cuda',
               **kwargs
               ):
    self.gaussian = gaussian
    gaussian._deformation.eval()
    pooling = nn.MaxPool2d(kernel_size=erode_size, stride=1, padding=int((erode_size - 1)//2)) # erode kernel
    self.erode = lambda x, y:gaussian_blur_masked(x, pooling(y) + pooling(-y))
    self.device = device
    super().__init__(ChunkCameras, FrameData, **kwargs)
    self.t1 = time.time()
    self.barrier = None

  async def __call__(self, chunks: ChunkCameras) -> FrameData:
    viewpoint_cams = chunks.cameras
    with torch.no_grad():
      # 该部分运行模型有warm-up机制, 会逐步增速
      outputs = render_from_batch(viewpoint_cams, 
                                  self.gaussian, 
                                  False, 
                                  background="gt", 
                                  visualize_attention=False,
                                  feature_inputs=["aud", "eye", "cam", "uid"],
                                  )
    images: torch.Tensor = outputs["rendered_image_tensor"] # BCHW
    alphas: torch.Tensor = outputs["rend_alpha_tensor"] # BCHW
    bFrameData = []
    torch.cuda.synchronize() # 之后就是渲染了, 此时需要清空gpu队列
    for idx, (timestamp, wave, camera) in enumerate(zip(chunks.times, chunks.waves, chunks.cameras)):
      image = images[idx]
      alpha = alphas[idx]
      image_gt = camera.original_image
      image_out = self.erode((image_gt * (1 - alpha) + image * alpha), alpha).permute(1, 2, 0).data.cpu().numpy()
      bFrameData.append(FrameData(timestamp, wave, image_out))
    passtime = time.time() - self.t1
    self.t1 = time.time()
    # print(f"module: {self.__class__.__name__}, time {passtime}, fps:{len(bFrameData)/passtime}")
    return bFrameData
  
if __name__ == '__main__': # Test Script
  class Args:
    def __init__(self):
      self.sh_degree = 3
      self.source_path = 'data/anchorwoman'
      self.model_path = 'model/anchorwomanFace'
      self.custom_wav = 'data/anchorwoman/aud.wav'
      self.images = 'images'
      self.resolution = -1
      self.background_type = "torso"
      self.data_device = 'cuda'
      self.eval = True
      self.render_process = False
      self.add_points = False
      self.extension = '.png'
      self.llffhold = 8
      self.dataloader = False
      self.zerostamp_init = False
      self.custom_sampler = None
      self.iterations = -1
      self.coarse_iterations = 7999
      self.position_lr_init = 0.00016
      self.position_lr_final = 1.6e-06
      self.position_lr_delay_mult = 0.01
      self.position_lr_max_steps = 20000
      self.deformation_lr_init = 0.0001
      self.deformation_lr_final = 1e-05
      self.deformation_lr_delay_mult = 0.01
      self.grid_lr_init = 0.0016
      self.grid_lr_final = 0.00016
      self.feature_lr = 0.0025
      self.opacity_lr = 0.05
      self.scaling_lr = 0.005
      self.rotation_lr = 0.001
      self.percent_dense = 0.01
      self.lambda_dssim = 0
      self.lambda_lpips = 0
      self.weight_constraint_init = 1
      self.weight_constraint_after = 0.2
      self.weight_decay_iteration = 5000
      self.opacity_reset_interval = 3000
      self.densification_interval = 100
      self.densify_from_iter = 1000
      self.densify_until_iter = 7000
      self.densify_grad_threshold_coarse = 0.001
      self.densify_grad_threshold_fine_init = 0.0002
      self.densify_grad_threshold_after = 0.0002
      self.pruning_from_iter = 500
      self.pruning_interval = 100
      self.opacity_threshold_coarse = 0.005
      self.opacity_threshold_fine_init = 0.005
      self.opacity_threshold_fine_after = 0.005
      self.batch_size = 16
      self.batch_size_infer = 25
      self.add_point = False
      self.lip_fine_tuning = True
      self.depth_fine_tuning = True
      self.split_gs_in_fine_stage = False
      self.canonical_tri_plane_factor_list = ['opacity', 'shs']
      self.train_l = ['xyz', 'deformation', 'grid', 'f_dc', 'f_rest', 'opacity', 'scaling', 'rotation']
      self.convert_SHs_python = False
      self.compute_cov3D_python = False
      self.debug = True
      self.net_width = 128
      self.timebase_pe = 4
      self.defor_depth = 2
      self.posebase_pe = 10
      self.scale_rotation_pe = 2
      self.opacity_pe = 2
      self.timenet_width = 64
      self.timenet_output = 32
      self.bounds = 1.6
      self.plane_tv_weight = 0.0002
      self.time_smoothness_weight = 0.001
      self.l1_time_planes = 0.0001
      self.kplanes_config = {'grid_dimensions': 2, 'input_coordinate_dim': 3, 'output_coordinate_dim': 32, 'resolution': [64, 64, 64]}
      self.multires = [1, 2]
      self.no_dx = False
      self.no_grid = False
      self.no_ds = False
      self.no_dr = False
      self.no_do = False
      self.no_dshs = False
      self.empty_voxel = False
      self.grid_pe = 0
      self.static_mlp = False
      self.apply_rotation = False
      self.only_infer = True
      self.d_model = 64
      self.n_head = 2
      self.drop_prob = 0.2
      self.ffn_hidden = 128
      self.n_layer = 1
      self.visualize_attention = False
      self.train_tri_plane = True
      self.pos_emb = True
      self.ip = '127.0.0.1'
      self.port = 6009
      self.debug_from = -1
      self.detect_anomaly = False
      self.test_iterations = list(range(0, 50000, 500))
      self.save_iterations = [1000, 3000, 4000, 5000, 6000, 7000, 9000, 10000, 12000, 14000, 20000, 30000, 45000, 60000, 30000]
      self.quiet = False
      self.checkpoint_iterations = []
      self.start_checkpoint = None
      self.expname = ''
      self.use_wandb = False
      self.configs = 'arguments/64_dim_1_transformer.py'
      self.iteration = 10000
      self.skip_train = False
      self.skip_test = False
      self.skip_video = False
      self.custom_aud = ''

  # 使用示例
  from scene import Scene
  from argparse import ArgumentParser
  from arguments import ModelParams, PipelineParams, get_combined_args, ModelHiddenParams
  args = Args()
  parser = ArgumentParser(description="Testing script parameters")
  model = ModelParams(parser, sentinel=True)
  pipeline = PipelineParams(parser)
  hyperparam = ModelHiddenParams(parser)

  dataset, hyperparam, iteration, pipe = model.extract(args), hyperparam.extract(args), args.iteration, pipeline.extract(args)
  skip_train, skip_test, skip_video = args.skip_train, args.skip_test, args.skip_video
  batch_size = 1

  data_dir = dataset.source_path
  gaussians = GaussianModel(dataset.sh_degree, hyperparam)
  scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False)
  gaussians.eval()
  viewpoint_stack = scene.getTestCameras()
  
  from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
  
  async def main():
    with ThreadPoolExecutor(max_workers=6) as executor:
      # TODO: 每个模块队列设置长度
      stream1 = WaveFileReader(args.custom_wav, executor=executor)
      stream2 = AudioBytes2Format(executor=executor)
      stream4 = Frame2Chunk(executor=executor)
      stream5 = Chunk2Avefeat(executor=executor)
      stream6 = Avefeat2ModelIn(viewpoint_stack, executor=executor) 
      stream7 = Model2Output(gaussians, executor=executor)
      stream8 = PasteOn("data/xxx/xxx.mp4", (277, 636, 508, 508), executor=executor)
      stream9 = BarrierPlayer(window_size=(320,640), executor=executor)
      stream1.link_to(stream2).link_to(stream4).link_to(stream5).link_to(stream6).link_to(stream7).link_to(stream8).link_to(stream9)
      await stream1.async_start()
        
  asyncio.run(main(), debug=False)
  # main_sync()