"""
LipReal 版本
基于流传输的Wav2Lip数字人实现
"""

import asyncio
import logging
import time
from threading import Thread, Event
import torch.multiprocessing as mp
import cv2
import numpy as np
import queue

from .websocket_media import MediaStreamer
from .ttsreal import BaseTTS, IndexTTS, EdgeTTS
from ..wav2lip.models import Wav2Lip
from .lipreal import LipReal

logger = logging.getLogger(__name__)

def mirror_index(size, index):
    """镜像索引函数"""
    turn = index // size
    res = index % size
    if turn % 2 == 0:
        return res
    else:
        return size - res - 1

class LipRealStream(LipReal):
    """基于流传输的LipReal实现"""

    def __init__(self, opt, model, avatar):
        super().__init__(opt, model, avatar)
        self.media_streamer: MediaStreamer = None
        self.async_mode = False  # 是否使用异步模式

    def set_media_streamer(self, streamer: MediaStreamer):
        """设置媒体流处理器"""
        self.media_streamer = streamer

    def set_async_mode(self, async_mode: bool):
        """设置是否使用异步模式"""
        self.async_mode = async_mode

    def render(self, quit_event, loop=None, audio_track=None, video_track=None):
        """使用流传输"""

        self.tts.render(quit_event)
        self.init_customindex()

        if self.media_streamer:
            process_thread = Thread(
                target=self.process_frames_sync,
                args=(quit_event, self.media_streamer),
                daemon=True,
                name=f"websocket-render-{self.sessionid}"
            )
            process_thread.start()
        else:
            process_thread = Thread(
                target=self.process_frames,
                args=(quit_event, loop, audio_track, video_track, None),
                daemon=True,
                name=f"legacy-render-{self.sessionid}"
            )
            process_thread.start()

        # 启动推理线程
        from .lipreal import inference
        inference_thread = Thread(
            target=inference,
            args=(
                quit_event,
                self.batch_size,
                self.face_list_cycle,
                self.asr.feat_queue,
                self.asr.output_queue,
                self.res_frame_queue,
                self.model,
            ),
            daemon=True,
            name=f"websocket-inference-{self.sessionid}"
        )
        inference_thread.start()

        # 主渲染循环
        count = 0
        totaltime = 0
        _starttime = mp.current_process().pid  # 使用进程ID作为开始时间

        while not quit_event.is_set():
            # 更新纹理每一帧
            t = mp.current_process().pid
            self.asr.run_step()

            # 连接检查和流控制
            if self.media_streamer:
                if not self.media_streamer.is_connected:
                    time.sleep(0.1)
                    continue

                # 检查队列大小，避免内存堆积
                if hasattr(self.media_streamer, 'websocket') and self.media_streamer.websocket:
                    try:
                        # 简单的背压控制
                        if self.res_frame_queue.qsize() > 10:
                            time.sleep(0.05)
                            continue
                    except:
                        pass

            # 性能统计
            count += 1
            if count % 100 == 0:
                elapsed = (mp.current_process().pid - _starttime) / 1000  # 简化的时间计算
                if elapsed > 0:
                    fps = count / elapsed

            # 短暂休眠避免CPU占用过高
            time.sleep(0.01)


    async def process_frames_async(self, quit_event, media_streamer=None):
        """异步版本的帧处理方法"""
        enable_transition = False  # 设置为False禁用过渡效果，True启用

        if enable_transition:
            _last_speaking = False
            _transition_start = time.time()
            _transition_duration = 0.1  # 过渡时间
            _last_silent_frame = None  # 静音帧缓存
            _last_speaking_frame = None  # 说话帧缓存

        frame_count = 0
        skipped_frames = 0
        last_stats_time = time.time()
        processed_frames = 0

        while not quit_event.is_set():
            try:
                res_frame, idx, audio_frames = self.res_frame_queue.get(block=True, timeout=0.1)
                # 有数据时正常处理
            except:
                # 没有数据时发送静音帧
                if media_streamer and not media_streamer.is_connected:
                    await asyncio.sleep(0.1)
                    continue

                # 发送静音帧 - 使用第一帧作为默认静音帧
                if hasattr(self, 'frame_list_cycle') and len(self.frame_list_cycle) > 0:
                    target_frame = self.frame_list_cycle[0].copy()
                    if target_frame is not None:
                        # 添加水印
                        cv2.putText(target_frame, "any4dh", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (128,128,128), 1)

                        # 发送视频帧
                        if media_streamer:
                            try:
                                await media_streamer.send_video_frame(target_frame)
                            except Exception as e:
                                logger.error(f"Silent frame sending failed: {e}")
                    else:
                        logger.error("target_frame is None!")
                else:
                    logger.error(f"No frame_list_cycle available! hasattr: {hasattr(self, 'frame_list_cycle')}, len: {len(getattr(self, 'frame_list_cycle', []))}")

                # 控制帧率
                await asyncio.sleep(1.0 / 25)  # 25 FPS
                continue

            # 连接检查
            if media_streamer and not media_streamer.is_connected:
                await asyncio.sleep(0.1)
                continue

            if enable_transition:
                # 检测状态变化
                current_speaking = not (audio_frames[0][1] != 0 and audio_frames[1][1] != 0)
                if current_speaking != _last_speaking:
                    _transition_start = time.time()
                _last_speaking = current_speaking

            # 使用与原始process_frames完全相同的逻辑
            if res_frame is None:  # 静音状态 - 推理返回None
                self.speaking = False
                audiotype = audio_frames[0][1] if audio_frames else 0
                if self.custom_index.get(audiotype) is not None:  # 有自定义视频
                    mirindex = mirror_index(len(self.custom_img_cycle[audiotype]), self.custom_index[audiotype])
                    target_frame = self.custom_img_cycle[audiotype][mirindex]
                    self.custom_index[audiotype] += 1
                else:
                    target_frame = self.frame_list_cycle[idx]

                if enable_transition:
                    # 说话→静音过渡
                    if time.time() - _transition_start < _transition_duration and _last_speaking_frame is not None:
                        alpha = min(1.0, (time.time() - _transition_start) / _transition_duration)
                        combine_frame = cv2.addWeighted(_last_speaking_frame, 1-alpha, target_frame, alpha, 0)
                    else:
                        combine_frame = target_frame
                    # 缓存静音帧
                    _last_silent_frame = combine_frame.copy()
                else:
                    combine_frame = target_frame
            else:
                self.speaking = True
                try:
                    current_frame = self.paste_back_frame(res_frame, idx)
                except Exception as e:
                    logger.warning(f"paste_back_frame error: {e}")
                    continue
                if enable_transition:
                    # 静音→说话过渡
                    if time.time() - _transition_start < _transition_duration and _last_silent_frame is not None:
                        alpha = min(1.0, (time.time() - _transition_start) / _transition_duration)
                        combine_frame = cv2.addWeighted(_last_silent_frame, 1-alpha, current_frame, alpha, 0)
                    else:
                        combine_frame = current_frame
                    # 缓存说话帧
                    _last_speaking_frame = combine_frame.copy()
                else:
                    combine_frame = current_frame

            # 添加水印
            cv2.putText(combine_frame, "any4dh", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (128,128,128), 1)

            # 传输视频帧 - 现在使用队列系统，线程安全
            if media_streamer:
                try:
                    await media_streamer.send_video_frame(combine_frame)
                except Exception as e:
                    logger.error(f"Video frame sending failed: {e}")
                    await asyncio.sleep(0.1)

            self.record_video_data(combine_frame)
            frame_count += 1

            # 处理音频帧
            for audio_frame in audio_frames:
                frame, type, eventpoint = audio_frame

                # 传输音频帧 - 现在使用队列系统，线程安全
                if media_streamer:
                    try:
                        await media_streamer.send_audio_frame(frame, eventpoint)
                    except Exception as e:
                        logger.error(f"Audio frame sending failed: {e}")

                self.record_audio_data(frame)

            # 控制帧率
            await asyncio.sleep(1.0 / 25)  # 25 FPS

    def process_frames_sync(self, quit_event, media_streamer):
        """同步包装器，用于在线程中运行异步帧处理"""
        try:
            # 创建新的事件循环
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            # 运行异步帧处理
            loop.run_until_complete(self.process_frames_async(quit_event, media_streamer))

        except Exception as e:
            logger.error(f"Error in process_frames_sync: {e}")
        finally:
            loop.close()

class LipRealStreamOptimized(LipRealStream):
    """优化版本的LipReal实现"""

    def __init__(self, opt, model, avatar):
        super().__init__(opt, model, avatar)

        # 优化参数
        self.enable_frame_skip = True
        self.max_queue_size = 5
        self.frame_skip_threshold = 10
        self.last_frame_sent_time = 0

    def render(self, quit_event, loop=None, audio_track=None, video_track=None):
        try:
            self.tts.render(quit_event)
        except Exception as e:
            logger.error(f"TTS render failed, continuing without TTS: {e}")

        self.init_customindex()

        if self.media_streamer:
            logger.info(f"Starting direct frame sending for session {self.sessionid}")
        else:
            logger.warning(f"No media streamer available!")
            return

        count = 0

        while not quit_event.is_set():
            try:
                # 检查连接
                if not self.media_streamer.is_connected:
                    time.sleep(0.1)
                    continue

                # 发送帧
                if hasattr(self, 'frame_list_cycle') and len(self.frame_list_cycle) > 0:
                    frame_idx = count % len(self.frame_list_cycle)
                    target_frame = self.frame_list_cycle[frame_idx].copy()

                    if target_frame is not None:
                        # 添加帧号
                        cv2.putText(target_frame, f"DH-{count}", (50, 50),
                                  cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

                        try:
                            result = self.send_frame_simple(target_frame)
                            if result:
                                if count % 120 == 0:  # 每120帧打印一次（约5秒）
                                    logger.info(f"Successfully sent frame #{count}, shape: {target_frame.shape}")
                            else:
                                if count % 60 == 0:  # 每60帧打印一次错误
                                    logger.error(f"Failed to send frame #{count}")
                        except Exception as e:
                            if count % 60 == 0:  # 每60帧打印一次错误
                                logger.error(f"Frame sending error: {e}")

                count += 1
                time.sleep(0.04)  # 25fps

            except Exception as e:
                logger.error(f"Main loop error: {e}")
                time.sleep(0.1)

        return

    def send_frame_simple(self, frame):
        """简单的同步帧发送方法"""
        try:
            # 直接通过MediaStreamer发送，避免复杂的异步处理
            if self.media_streamer and self.media_streamer.is_connected:
                import asyncio
                try:
                    # 获取当前运行的事件循环
                    try:
                        current_loop = asyncio.get_running_loop()
                        logger.debug(f"Found running event loop: {id(current_loop)}")
                    except RuntimeError:
                        current_loop = None

                    # 检查WebSocket的事件循环
                    ws_loop = None
                    if (hasattr(self.media_streamer, 'websocket') and
                        self.media_streamer.websocket is not None):
                        ws_loop = getattr(self.media_streamer.websocket, '_loop', None)
                        logger.debug(f"WebSocket loop: {id(ws_loop) if ws_loop else 'None'}")

                    # 如果事件循环兼容，直接使用
                    if current_loop and ws_loop and current_loop is ws_loop:
                        return current_loop.run_until_complete(
                            self.media_streamer.send_video_frame(frame)
                        )
                    elif current_loop:
                        # 使用run_coroutine_threadsafe进行跨循环调用
                        future = asyncio.run_coroutine_threadsafe(
                            self.media_streamer.send_video_frame(frame),
                            current_loop
                        )
                        return future.result(timeout=1.0)
                    else:
                        # 创建新的事件循环
                        loop = asyncio.new_event_loop()
                        asyncio.set_event_loop(loop)
                        try:
                            return loop.run_until_complete(
                                self.media_streamer.send_video_frame(frame)
                            )
                        finally:
                            loop.close()

                except Exception as loop_error:
                    logger.error(f"Event loop handling error: {loop_error}")
                    return False
            else:
                logger.warning("MediaStreamer not available or disconnected, cannot send frame")
                return False

        except Exception as e:
            logger.error(f"Simple frame send error: {e}")
            return False