"""媒体处理器 - OpenCV摄像头"""
import cv2
import pyaudio
import base64
import threading
import time
import os
import glob
import json
import queue
import subprocess
import sys
from datetime import datetime
from .config import MEDIA_CONFIG, setup_logging
from .config import GPIO_CONFIG
from .model_manager import ModelManager

class MediaProcessor:
    """统一的媒体处理器"""
    
    def __init__(self, connection_manager):
        self.conn = connection_manager
        self.logger = setup_logging('Media')
        
        self.cap = None
        self.camera_lock = threading.Lock()
        
        # 音频
        self.audio = None
        self.audio_stream = None
        self.audio_lock = threading.Lock()
        
        # 定时拍照控制
        self.auto_capture_enabled = False
        self.auto_capture_thread = None
        self.capture_interval = MEDIA_CONFIG.get('capture_interval', 3.0)
        
        # 视频字节流
        self.streaming = False
        # 旧字段保留但不再使用（兼容）
        self.video_stream_thread = None
        # 双线程：采集线程 + 发送线程
        self.video_capture_thread = None
        self.video_sender_thread = None
        # 单元素队列：始终只保留最新帧（编码后的字节）
        self.video_frame_queue = queue.Queue(maxsize=1)
        # 原始帧队列：供推理线程使用（RGB/BGR矩阵）
        self.video_raw_frame_queue = queue.Queue(maxsize=1)
        # 推理线程
        self.video_infer_thread = None
        # 握手状态（用于断线后自动重新握手）
        self._handshake_done = False
        self._audio_handshake_done = False
        # 模型管理器
        self.models = None
        # 本地语音播报控制
        self._last_alert_ts = 0.0
        self._alert_cooldown = 2.0  # 秒
        
        self._ultra_thread = None
        self._ultra_running = False
        self._ultra_latest_m = None
        self._ultra_prev_m = None
        self._ultra_alert_ts = 0.0
        self._ultra_gpiod = None
        try:
            self._ultra_jump_threshold = float(MEDIA_CONFIG.get('ultrasonic_jump_threshold_m', 0.3))
        except Exception:
            self._ultra_jump_threshold = 0.3
        try:
            self._ultra_alert_cooldown = float(MEDIA_CONFIG.get('ultrasonic_alert_cooldown_s', 3.0))
        except Exception:
            self._ultra_alert_cooldown = 3.0
        
        self.audio_streaming = False
        self.audio_capture_thread = None
        self.audio_sender_thread = None
        self.audio_frame_queue = queue.Queue(maxsize=1)
        self.audio_conn = None

    def init_camera(self):
        try:
            self._cleanup_camera_resources()
            primary = MEDIA_CONFIG.get('camera_id', 0)
            nums = list(range(8))
            candidates = [primary] + [i for i in nums if not isinstance(primary, int) or i != primary]
            if sys.platform.startswith('linux'):
                try:
                    devs = sorted(glob.glob('/dev/video*'))
                    candidates.extend(devs)
                except Exception:
                    pass
            w_default = int(MEDIA_CONFIG.get('camera_width', 640))
            h_default = int(MEDIA_CONFIG.get('camera_height', 480))
            fps = int(MEDIA_CONFIG.get('camera_fps', 30))
            resolutions = [(w_default, h_default), (1280, 720), (1920, 1080)]
            for cid in candidates:
                try:
                    if isinstance(cid, str) and sys.platform.startswith('linux'):
                        self.cap = cv2.VideoCapture(cid, cv2.CAP_V4L2)
                    else:
                        try:
                            self.cap = cv2.VideoCapture(cid, cv2.CAP_V4L2)
                        except Exception:
                            self.cap = cv2.VideoCapture(cid)
                    if not self.cap or not self.cap.isOpened():
                        self.cap = None
                        continue
                    for (w, h) in resolutions:
                        try:
                            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
                            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
                            self.cap.set(cv2.CAP_PROP_FPS, fps)
                            try:
                                fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                                self.cap.set(cv2.CAP_PROP_FOURCC, fourcc)
                            except Exception:
                                try:
                                    fourcc = cv2.VideoWriter_fourcc(*'YUYV')
                                    self.cap.set(cv2.CAP_PROP_FOURCC, fourcc)
                                except Exception:
                                    pass
                            time.sleep(0.2)
                            ok = False
                            frame = None
                            for _ in range(5):
                                r, f = self.cap.read()
                                if r and f is not None:
                                    ok = True
                                    frame = f
                                    break
                                time.sleep(0.05)
                            if ok and frame is not None:
                                self.logger.info(f"摄像头初始化成功: id={cid}, {frame.shape[1]}x{frame.shape[0]}@{fps}")
                                return True
                        except Exception:
                            continue
                    try:
                        self.cap.release()
                    except Exception:
                        pass
                    self.cap = None
                except Exception:
                    try:
                        if self.cap:
                            self.cap.release()
                    except Exception:
                        pass
                    self.cap = None
                    continue
            self.logger.error("摄像头设备不可用")
            return False
        except Exception as e:
            self.logger.error(f"摄像头初始化异常: {e}")
            return False
    
    def _cleanup_camera_resources(self):
        if self.cap:
            try:
                self.cap.release()
            except Exception:
                pass
            self.cap = None
    
    def _init_picamera2(self):
        """使用picamera2初始化摄像头"""
        try:
            # 确保之前的实例已清理
            if self.picamera2:
                try:
                    self.picamera2.stop()
                    self.picamera2.close()
                except:
                    pass
                self.picamera2 = None
            
            # 创建新的picamera2实例
            self.picamera2 = Picamera2()
            
            # 配置摄像头参数
            width = MEDIA_CONFIG.get('camera_width', 1280)
            height = MEDIA_CONFIG.get('camera_height', 720)
            
            # 获取可用的摄像头配置
            camera_config = self.picamera2.create_preview_configuration(
                main={"size": (width, height), "format": "RGB888"}
            )
            
            # 应用配置
            self.picamera2.configure(camera_config)
            
            # 启动摄像头
            self.picamera2.start()
            
            # 等待摄像头稳定
            time.sleep(2)
            
            # 测试捕获一帧
            test_frame = self.picamera2.capture_array()
            if test_frame is not None and test_frame.size > 0:
                self.logger.info(f"picamera2摄像头测试成功，分辨率: {test_frame.shape}")
                return True
            else:
                self.logger.warning("picamera2测试捕获失败")
                self._cleanup_picamera2()
                return False
                
        except Exception as e:
            self.logger.warning(f"picamera2初始化失败: {e}")
            self._cleanup_picamera2()
            return False
    
    def _cleanup_picamera2(self):
        return None

    def _init_ultrasonic(self):
        try:
            if not GPIO_CONFIG.get('is_orangepi', False):
                return False

            trig_name = str(GPIO_CONFIG.get('ultrasonic_trig_pin_name', '')).strip()
            echo_name = str(GPIO_CONFIG.get('ultrasonic_echo_pin_name', '')).strip()
            if trig_name and echo_name:
                try:
                    import gpiod
                    def find(name):
                        try:
                            out = subprocess.run(['gpiofind', name], capture_output=True, text=True, timeout=2)
                            if out.returncode == 0:
                                parts = out.stdout.strip().split()
                                if len(parts) == 2:
                                    return parts[0], int(parts[1])
                        except Exception:
                            pass
                        try:
                            candidates = [name, name.upper()]
                            for chip_idx in range(0, 8):
                                chip_path = f"/dev/gpiochip{chip_idx}"
                                if not os.path.exists(chip_path):
                                    continue
                                chip = gpiod.Chip(chip_path)
                                for offset in range(0, 256):
                                    try:
                                        line = chip.get_line(offset)
                                        ln = (line.name or '').strip()
                                        if ln and ln in candidates:
                                            chip.close()
                                            return chip_path, offset
                                    except Exception:
                                        break
                                chip.close()
                        except Exception:
                            pass
                        return None
                    t = find(trig_name)
                    e = find(echo_name)
                    if t and e:
                        t_chip_path, t_offset = t
                        e_chip_path, e_offset = e
                        t_chip = gpiod.Chip(t_chip_path)
                        e_chip = gpiod.Chip(e_chip_path)
                        t_line = t_chip.get_line(t_offset)
                        e_line = e_chip.get_line(e_offset)
                        t_line.request(consumer='ultra_trig', type=gpiod.LINE_REQ_DIR_OUT)
                        e_line.request(consumer='ultra_echo', type=gpiod.LINE_REQ_DIR_IN)
                        try:
                            t_line.set_value(0)
                        except Exception:
                            pass
                        self._ultra_running = True
                        self._ultra_gpiod = {
                            't_chip': t_chip, 'e_chip': e_chip,
                            't_line': t_line, 'e_line': e_line
                        }
                        self._ultra_thread = threading.Thread(target=self._ultra_loop_gpiod, args=(t_line, e_line), name="UltrasonicLoop", daemon=True)
                        self._ultra_thread.start()
                        self.logger.info(f"超声波测距启动(gpiod): trig={trig_name}({t_chip_path}:{t_offset}), echo={echo_name}({e_chip_path}:{e_offset})")
                        return True
                    else:
                        self.logger.warning("未找到匹配的GPIO行名，回退到BOARD引脚配置")
                except Exception as ge:
                    self.logger.warning(f"libgpiod初始化失败，回退到BOARD引脚: {ge}")

            try:
                import OPi.GPIO as GPIO
            except Exception:
                return False
            trig = int(GPIO_CONFIG.get('ultrasonic_trig_pin', 10))
            echo = int(GPIO_CONFIG.get('ultrasonic_echo_pin', 11))
            GPIO.setmode(GPIO.BOARD)
            try:
                GPIO.setwarnings(False)
            except Exception:
                pass
            try:
                GPIO.cleanup(trig)
            except Exception:
                pass
            try:
                GPIO.cleanup(echo)
            except Exception:
                pass
            GPIO.setup(trig, GPIO.OUT)
            GPIO.setup(echo, GPIO.IN)
            GPIO.output(trig, GPIO.LOW)
            self._ultra_running = True
            self._ultra_thread = threading.Thread(target=self._ultra_loop, args=(GPIO, trig, echo), name="UltrasonicLoop", daemon=True)
            self._ultra_thread.start()
            self.logger.info(f"超声波测距启动: trig={trig}, echo={echo}")
            return True
        except Exception as e:
            self.logger.warning(f"超声波初始化失败: {e}")
            return False

    def _ultra_loop(self, GPIO, trig, echo):
        interval = 0.2
        samples = []
        max_samples = 5
        while self._ultra_running:
            try:
                GPIO.output(trig, GPIO.HIGH)
                time.sleep(0.00001)
                GPIO.output(trig, GPIO.LOW)

                t0 = time.perf_counter()
                timeout = 0.03
                while GPIO.input(echo) == GPIO.LOW:
                    if time.perf_counter() - t0 > timeout:
                        raise TimeoutError("echo HIGH timeout")
                start = time.perf_counter()
                while GPIO.input(echo) == GPIO.HIGH:
                    if time.perf_counter() - start > timeout:
                        break
                end = time.perf_counter()

                pulse = end - start
                dist_m = (pulse * 343.0) / 2.0
                if 0.05 <= dist_m <= 4.0:
                    samples.append(dist_m)
                    if len(samples) > max_samples:
                        samples.pop(0)
                    med = sorted(samples)[len(samples)//2]
                    self._ultra_latest_m = med
                    try:
                        if self._ultra_prev_m is not None:
                            delta = self._ultra_prev_m - med
                            if abs(delta) >= self._ultra_jump_threshold:
                                now = time.time()
                                if now - self._ultra_alert_ts >= self._ultra_alert_cooldown:
                                    msg = "前方可能有障碍物" if delta > 0 else "前方可能有坑"
                                    threading.Thread(target=self._speak_text_espeak, args=(msg,), daemon=True).start()
                                    self._ultra_alert_ts = now
                        self._ultra_prev_m = med
                    except Exception:
                        pass
                time.sleep(interval)
            except Exception:
                time.sleep(interval)

    def _ultra_loop_gpiod(self, t_line, e_line):
        interval = 0.2
        samples = []
        max_samples = 5
        while self._ultra_running:
            try:
                try:
                    t_line.set_value(1)
                    time.sleep(0.00001)
                    t_line.set_value(0)
                except Exception:
                    pass
                t0 = time.perf_counter()
                timeout = 0.03
                while True:
                    v = 0
                    try:
                        v = int(e_line.get_value())
                    except Exception:
                        v = 0
                    if v == 1:
                        break
                    if time.perf_counter() - t0 > timeout:
                        raise TimeoutError("echo HIGH timeout")
                start = time.perf_counter()
                while True:
                    v = 1
                    try:
                        v = int(e_line.get_value())
                    except Exception:
                        v = 1
                    if v == 0:
                        break
                    if time.perf_counter() - start > timeout:
                        break
                end = time.perf_counter()
                pulse = end - start
                dist_m = (pulse * 343.0) / 2.0
                if 0.05 <= dist_m <= 4.0:
                    samples.append(dist_m)
                    if len(samples) > max_samples:
                        samples.pop(0)
                    med = sorted(samples)[len(samples)//2]
                    self._ultra_latest_m = med
                    try:
                        if self._ultra_prev_m is not None:
                            delta = self._ultra_prev_m - med
                            if abs(delta) >= self._ultra_jump_threshold:
                                now = time.time()
                                if now - self._ultra_alert_ts >= self._ultra_alert_cooldown:
                                    msg = "前方可能有障碍物" if delta > 0 else "前方可能有坑"
                                    threading.Thread(target=self._speak_text_espeak, args=(msg,), daemon=True).start()
                                    self._ultra_alert_ts = now
                        self._ultra_prev_m = med
                    except Exception:
                        pass
                time.sleep(interval)
            except Exception:
                time.sleep(interval)

    def start_ultrasonic(self):
        try:
            return bool(self._init_ultrasonic())
        except Exception:
            return False

    def get_ultrasonic_distance(self):
        try:
            return self._ultra_latest_m
        except Exception:
            return None

    def _cleanup_ultrasonic(self):
        try:
            if self._ultra_running:
                self._ultra_running = False
                if self._ultra_thread and self._ultra_thread.is_alive():
                    self._ultra_thread.join(timeout=2)
            try:
                if GPIO_CONFIG.get('is_orangepi', False):
                    try:
                        import OPi.GPIO as GPIO
                        GPIO.cleanup()
                    except Exception:
                        pass
                    try:
                        if hasattr(self, '_ultra_gpiod') and isinstance(self._ultra_gpiod, dict):
                            gl = self._ultra_gpiod
                            try:
                                if gl.get('t_line'):
                                    gl['t_line'].release()
                            except Exception:
                                pass
                            try:
                                if gl.get('e_line'):
                                    gl['e_line'].release()
                            except Exception:
                                pass
                            try:
                                if gl.get('t_chip'):
                                    gl['t_chip'].close()
                            except Exception:
                                pass
                            try:
                                if gl.get('e_chip'):
                                    gl['e_chip'].close()
                            except Exception:
                                pass
                            self._ultra_gpiod = None
                    except Exception:
                        pass
            except Exception:
                pass
        except Exception as e:
            self.logger.debug(f"清理超声波模块失败: {e}")
    

    def start_auto_capture(self):
        """启动自动拍照功能"""
        if not self._is_camera_available():
            self.logger.error("摄像头不可用")
            return False
            
        if self.auto_capture_enabled:
            self.logger.info("自动拍照功能已在运行")
            return True
            
        self.auto_capture_enabled = True
        self.auto_capture_thread = threading.Thread(target=self._auto_capture_loop, daemon=True)
        self.auto_capture_thread.start()
        self.logger.info("自动拍照功能已启动")
        return True
    
    def _is_camera_available(self):
        return bool(self.cap and self.cap.isOpened())
    
    def stop_auto_capture(self):
        """停止自动拍照功能"""
        self.auto_capture_enabled = False
        if self.auto_capture_thread and self.auto_capture_thread.is_alive():
            self.auto_capture_thread.join(timeout=5)
        self.logger.info("自动拍照功能已停止")
    
    def _auto_capture_loop(self):
        """自动拍照循环"""
        while self.auto_capture_enabled:
            try:
                # 拍摄新照片
                ret, frame = self.capture_image()
                if ret and frame is not None:
                    # 保存到 data 文件夹
                    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                    filename = f"camera_capture_{timestamp}.jpg"
                    filepath = os.path.join("data", filename)
                    
                    # 确保 data 目录存在
                    os.makedirs("data", exist_ok=True)
                    
                    # 保存图片
                    success = cv2.imwrite(filepath, frame)
                    if success:
                        self.logger.debug(f"自动拍照保存: {filepath}")
                        # 清理旧照片
                        self._cleanup_old_captures()
                    else:
                        self.logger.warning(f"图片保存失败: {filepath}")
                else:
                    self.logger.warning("自动拍照失败")
                    
                # 等待下次拍照
                time.sleep(self.capture_interval)
                
            except Exception as e:
                self.logger.error(f"自动拍照异常: {e}")
                time.sleep(5)
    
    def _cleanup_old_captures(self):
        """清理旧的拍照文件"""
        try:
            max_files = MEDIA_CONFIG.get('max_capture_files', 10)
            
            # 获取 data 目录下的所有摄像头照片
            pattern = os.path.join("data", "camera_capture_*.jpg")
            all_files = glob.glob(pattern)
            
            if len(all_files) <= max_files:
                return
            
            # 按文件修改时间排序（最新的在前）
            all_files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
            
            # 保留最新的N张，删除其余的
            files_to_delete = all_files[max_files:]
            
            for file_path in files_to_delete:
                try:
                    os.remove(file_path)
                    self.logger.debug(f"删除旧照片: {os.path.basename(file_path)}")
                except Exception as e:
                    self.logger.warning(f"删除旧照片失败 {file_path}: {e}")
                
        except Exception as e:
            self.logger.warning(f"清理旧照片异常: {e}")
    
    def init_audio(self):
        """初始化音频"""
        try:
            self.audio = pyaudio.PyAudio()
            
            # 查找输入设备
            device_count = self.audio.get_device_count()
            input_device = None
            for i in range(device_count):
                device_info = self.audio.get_device_info_by_index(i)
                if device_info['maxInputChannels'] > 0:
                    input_device = i
                    break
            
            if input_device is None:
                return False
            
            self.audio_stream = self.audio.open(
                format=pyaudio.paInt16,
                channels=MEDIA_CONFIG['audio_channels'],
                rate=MEDIA_CONFIG['audio_rate'],
                input=True,
                input_device_index=input_device,
                frames_per_buffer=MEDIA_CONFIG['audio_chunk']
            )
            return True
        except Exception as e:
            self.logger.error(f"音频初始化失败: {e}")
            return False
    
    def capture_image(self):
        with self.camera_lock:
            try:
                if self.cap and self.cap.isOpened():
                    ret, frame = self.cap.read()
                    if ret and frame is not None:
                        return True, frame
                    else:
                        return False, None
                else:
                    return False, None
            
            except Exception as e:
                self.logger.error(f"图像捕获异常: {e}")
                return False, None
    
    def cleanup(self):
        """清理所有媒体资源"""
        try:
            # 停止自动拍照
            self.stop_auto_capture()
            
            # 停止视频字节流
            self.stop_video_stream()
            # 停止音频字节流
            self.stop_audio_stream()
            
            # 清理摄像头资源
            self._cleanup_camera_resources()
            
            # 清理音频资源
            if hasattr(self, 'audio') and self.audio:
                try:
                    if self.audio_stream:
                        self.audio_stream.stop_stream()
                        self.audio_stream.close()
                    self.audio.terminate()
                except Exception as e:
                    self.logger.warning(f"音频资源清理失败: {e}")
                finally:
                    self.audio = None
                    self.audio_stream = None
            
            self.logger.info("媒体资源清理完成")
            
        except Exception as e:
            self.logger.error(f"媒体资源清理异常: {e}")
    
    def save_image(self, frame, prefix="camera_image"):
        """保存图像"""
        timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
        filename = f"{prefix}_{timestamp}.jpg"
        filepath = os.path.join(MEDIA_CONFIG['media_dir'], filename)
        
        try:
            cv2.imwrite(filepath, frame)
            return filepath
        except Exception as e:
            self.logger.error(f"保存图像失败: {e}")
            return None
    
    def encode_image(self, frame):
        """编码图像为base64"""
        try:
            _, buffer = cv2.imencode('.jpg', frame)
            return base64.b64encode(buffer).decode('utf-8')
        except Exception as e:
            self.logger.error(f"图像编码失败: {e}")
            return None
    
    def capture_audio(self):
        """捕获音频数据"""
        if not self.audio_stream:
            if not self.init_audio():
                return None
        
        try:
            with self.audio_lock:
                data = self.audio_stream.read(MEDIA_CONFIG['audio_chunk'])
                return base64.b64encode(data).decode('utf-8')
        except Exception as e:
            self.logger.error(f"音频捕获失败: {e}")
            return None
    
    def get_capture_stats(self):
        """获取摄像头捕获统计信息"""
        try:
            pattern = os.path.join("data", "camera_capture_*.jpg")
            all_files = glob.glob(pattern)
            
            if not all_files:
                return {
                    'total_files': 0,
                    'total_size': 0,
                    'oldest_file': None,
                    'newest_file': None
                }
            
            # 按修改时间排序
            all_files.sort(key=lambda x: os.path.getmtime(x))
            
            total_size = sum(os.path.getsize(f) for f in all_files if os.path.exists(f))
            
            return {
                'total_files': len(all_files),
                'total_size': total_size,
                'total_size_mb': round(total_size / (1024 * 1024), 2),
                'oldest_file': os.path.basename(all_files[0]) if all_files else None,
                'newest_file': os.path.basename(all_files[-1]) if all_files else None,
                'max_files_limit': MEDIA_CONFIG.get('max_capture_files', 10)
            }
        except Exception as e:
            self.logger.error(f"获取捕获统计信息失败: {e}")
            return None

    def _load_audio_message_template(self):
        """加载 examples/音频消息.json 模板"""
        try:
            project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
            tpl_path = os.path.join(project_root, 'examples', '音频消息.json')
            with open(tpl_path, 'r', encoding='utf-8') as f:
                return json.load(f)
        except Exception as e:
            self.logger.warning(f"加载音频消息模板失败: {e}")
            return None

    def _send_audio_handshake(self, timeout=10):
        msg = self._load_audio_message_template() or {}
        msg['type'] = 'audio'
        msg['role'] = 'hardware'
        msg['audio'] = {
            'rate': int(MEDIA_CONFIG.get('audio_rate', 16000)),
            'channels': int(MEDIA_CONFIG.get('audio_channels', 1)),
            'format': 'pcm16',
            'chunk': int(MEDIA_CONFIG.get('audio_chunk', 1024)),
            'interval': float(MEDIA_CONFIG.get('audio_interval', 0.05)),
            'best_effort': True
        }
        if not (self.audio_conn and self.audio_conn.send_json(msg)):
            self.logger.error("音频握手消息发送失败")
            return False
        self.logger.info("等待服务端音频握手确认...")
        resp = self.audio_conn.receive_json(timeout=timeout) if self.audio_conn else None
        if not isinstance(resp, dict):
            self.logger.error("服务端未返回有效JSON或超时，取消音频传输")
            return False
        if resp.get('status') == 'success' or (resp.get('type') == 'confirmation' and resp.get('status') == 'success'):
            self.logger.info("音频握手成功，开始传输音频字节流")
            return True
        self.logger.error(f"音频握手失败，服务端响应: {resp}")
        return False

    def start_audio_stream(self):
        if self.audio_streaming:
            self.logger.info("音频字节流已在运行")
            return True
        if not self.init_audio():
            self.logger.error("音频设备不可用，无法启动音频传输")
            return False
        try:
            from .connection import ConnectionManager
            self.audio_conn = ConnectionManager()
            if not self.audio_conn.connect():
                self.logger.error("音频连接建立失败")
                self.audio_conn = None
                return False
            if not self.audio_conn.authenticate():
                self.logger.error("音频连接身份认证失败")
                try:
                    self.audio_conn.close()
                except Exception:
                    pass
                self.audio_conn = None
                return False
        except Exception as e:
            self.logger.error(f"音频连接初始化异常: {e}")
            self.audio_conn = None
            return False
        if not self._send_audio_handshake(timeout=5):
            return False
        self._audio_handshake_done = True
        self.audio_streaming = True
        self.audio_capture_thread = threading.Thread(target=self._audio_capture_loop, name="AudioCaptureLoop", daemon=True)
        self.audio_sender_thread = threading.Thread(target=self._audio_sender_loop, name="AudioSenderLoop", daemon=True)
        self.audio_capture_thread.start()
        self.audio_sender_thread.start()
        self.logger.info("音频字节流传输已启动")
        return True

    def stop_audio_stream(self):
        self.audio_streaming = False
        self._audio_handshake_done = False
        try:
            if self.audio_sender_thread and self.audio_sender_thread.is_alive():
                self.audio_sender_thread.join(timeout=5)
        except Exception:
            pass
        try:
            if self.audio_capture_thread and self.audio_capture_thread.is_alive():
                self.audio_capture_thread.join(timeout=5)
        except Exception:
            pass
        self.audio_sender_thread = None
        self.audio_capture_thread = None
        try:
            while not self.audio_frame_queue.empty():
                self.audio_frame_queue.get_nowait()
        except Exception:
            pass
        try:
            if self.audio_conn:
                self.audio_conn.close()
        except Exception:
            pass
        self.audio_conn = None
        self.logger.info("音频字节流传输已停止")

    def _audio_capture_loop(self):
        interval = float(MEDIA_CONFIG.get('audio_interval', 0.05))
        chunk = int(MEDIA_CONFIG.get('audio_chunk', 1024))
        while self.audio_streaming:
            try:
                if not self.audio_stream:
                    time.sleep(interval)
                    continue
                with self.audio_lock:
                    data = self.audio_stream.read(chunk, exception_on_overflow=False)
                try:
                    self.audio_frame_queue.put_nowait(data)
                except queue.Full:
                    try:
                        _ = self.audio_frame_queue.get_nowait()
                    except Exception:
                        pass
                    try:
                        self.audio_frame_queue.put_nowait(data)
                    except Exception:
                        pass
                time.sleep(interval)
            except Exception as e:
                self.logger.error(f"音频采集异常: {e}")
                time.sleep(interval)

    def _audio_sender_loop(self):
        interval = float(MEDIA_CONFIG.get('audio_interval', 0.05))
        send_timeout = float(MEDIA_CONFIG.get('video_send_timeout', 0.05))
        while self.audio_streaming:
            try:
                if not (self.audio_conn and self.audio_conn.is_connected()):
                    try:
                        while not self.audio_frame_queue.empty():
                            self.audio_frame_queue.get_nowait()
                    except Exception:
                        pass
                    self._audio_handshake_done = False
                    time.sleep(0.5)
                    continue
                if not self._audio_handshake_done:
                    if self._send_audio_handshake(timeout=5):
                        self._audio_handshake_done = True
                    else:
                        time.sleep(1.0)
                        continue
                try:
                    data = self.audio_frame_queue.get(timeout=interval)
                    while not self.audio_frame_queue.empty():
                        try:
                            data = self.audio_frame_queue.get_nowait()
                        except Exception:
                            break
                except Exception:
                    continue
                sent = self.audio_conn.send_binary_quick(data, timeout=send_timeout) if self.audio_conn else False
                if not sent:
                    self.logger.debug("音频帧发送失败或超时，已丢弃")
            except Exception as e:
                self.logger.error(f"音频发送异常: {e}")
                time.sleep(0.5)

    def _send_video_handshake(self, timeout=10):
        """在传输字节流之前发送握手消息（基于音频消息.json，type=video）"""
        msg = self._load_audio_message_template() or {}
        msg['type'] = 'video'
        msg['role'] = 'hardware'
        # 附带流参数，便于服务端了解目标尺寸与质量（若服务端支持）
        msg['video'] = {
            'width': MEDIA_CONFIG.get('video_width', MEDIA_CONFIG.get('camera_width', 640)),
            'height': MEDIA_CONFIG.get('video_height', MEDIA_CONFIG.get('camera_height', 480)),
            'interval': MEDIA_CONFIG.get('video_interval', 0.1),
            'jpeg_quality': MEDIA_CONFIG.get('video_jpeg_quality', 60),
            'best_effort': True
        }

        if not self.conn.send_json(msg):
            self.logger.error("视频握手消息发送失败")
            return False

        self.logger.info("等待服务端握手确认...")
        resp = self.conn.receive_json(timeout=timeout)
        if not isinstance(resp, dict):
            self.logger.error("服务端未返回有效JSON或超时，取消视频传输")
            return False

        if resp.get('status') == 'success':
            self.logger.info("握手成功，开始传输视频字节流")
            return True
        if resp.get('type') == 'confirmation' and resp.get('status') == 'success':
            self.logger.info("握手成功（confirmation），开始传输视频字节流")
            return True

        self.logger.error(f"握手失败，服务端响应: {resp}")
        return False

    def start_video_stream(self):
        """启动视频字节流传输（握手成功后才开始）"""
        if not self._is_camera_available():
            self.logger.error("摄像头不可用，无法启动视频传输")
            return False
        if self.streaming:
            self.logger.info("视频字节流已在运行")
            return True
        if not self._send_video_handshake():
            return False

        self.streaming = True
        # 初始化模型管理器
        try:
            self.models = ModelManager()
            self.models.init()
        except Exception as e:
            self.logger.warning(f"模型初始化失败: {e}")
        try:
            self._init_ultrasonic()
        except Exception:
            pass

        # 启动采集与发送双线程
        self.video_capture_thread = threading.Thread(target=self._video_capture_loop, name="VideoCaptureLoop", daemon=True)
        self.video_sender_thread = threading.Thread(target=self._video_sender_loop, name="VideoSenderLoop", daemon=True)
        self.video_capture_thread.start()
        self.video_sender_thread.start()
        # 启动推理线程
        self.video_infer_thread = threading.Thread(target=self._video_inference_loop, name="VideoInferenceLoop", daemon=True)
        self.video_infer_thread.start()
        self._handshake_done = True
        self.logger.info("视频字节流传输已启动")
        return True

    def stop_video_stream(self):
        """停止视频字节流传输"""
        self.streaming = False
        self._handshake_done = False
        # 停止发送线程
        if self.video_sender_thread and self.video_sender_thread.is_alive():
            try:
                self.video_sender_thread.join(timeout=5)
            except Exception:
                pass
        self.video_sender_thread = None
        # 停止采集线程
        if self.video_capture_thread and self.video_capture_thread.is_alive():
            try:
                self.video_capture_thread.join(timeout=5)
            except Exception:
                pass
        self.video_capture_thread = None
        # 清空队列
        try:
            while not self.video_frame_queue.empty():
                self.video_frame_queue.get_nowait()
        except Exception:
            pass
        try:
            while not self.video_raw_frame_queue.empty():
                self.video_raw_frame_queue.get_nowait()
        except Exception:
            pass
        # 停止推理线程
        if self.video_infer_thread and self.video_infer_thread.is_alive():
            try:
                self.video_infer_thread.join(timeout=5)
            except Exception:
                pass
        self.video_infer_thread = None
        self._cleanup_ultrasonic()
        self.logger.info("视频字节流传输已停止")

    def _video_capture_loop(self):
        """采集/编码循环：抓取帧 -> 缩放 -> 编码JPEG -> 入队(仅保留最新)"""
        interval = MEDIA_CONFIG.get('video_interval', 0.1)
        quality = int(MEDIA_CONFIG.get('video_jpeg_quality', 60))
        target_w = MEDIA_CONFIG.get('video_width', None)
        target_h = MEDIA_CONFIG.get('video_height', None)
        while self.streaming:
            try:
                start_ts = time.perf_counter()
                ret, frame = self.capture_image()
                if ret and frame is not None:
                    # 降分辨率（仅视频流，不影响拍照保存）
                    try:
                        if target_w and target_h and (
                            frame.shape[1] != int(target_w) or frame.shape[0] != int(target_h)
                        ):
                            frame = cv2.resize(frame, (int(target_w), int(target_h)), interpolation=cv2.INTER_AREA)
                    except Exception as resize_e:
                        self.logger.debug(f"视频帧缩放失败，保持原始分辨率: {resize_e}")

                    ok, buffer = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), int(quality)])
                    if ok:
                        data = buffer.tobytes()
                        try:
                            self.video_frame_queue.put_nowait(data)
                        except queue.Full:
                            try:
                                _ = self.video_frame_queue.get_nowait()
                            except Exception:
                                pass
                            try:
                                self.video_frame_queue.put_nowait(data)
                            except Exception:
                                pass
                        # 同步原始帧到推理队列（使用当前缩放后的矩阵）
                        try:
                            self.video_raw_frame_queue.put_nowait(frame.copy())
                        except queue.Full:
                            try:
                                _ = self.video_raw_frame_queue.get_nowait()
                            except Exception:
                                pass
                            try:
                                self.video_raw_frame_queue.put_nowait(frame.copy())
                            except Exception:
                                pass
                    else:
                        self.logger.warning("视频帧编码失败")
                else:
                    self.logger.warning("视频帧捕获失败")

                # 动态节拍：按耗时补偿，避免额外等待
                elapsed = time.perf_counter() - start_ts
                sleep_time = max(0.0, interval - elapsed)
                if sleep_time > 0:
                    time.sleep(sleep_time)
                else:
                    if elapsed > interval * 2:
                        self.logger.debug(f"采集编码耗时较长: {elapsed:.3f}s，超过目标间隔{interval:.3f}s")
            except Exception as e:
                self.logger.error(f"视频采集异常: {e}")
                time.sleep(0.5)

    def _video_sender_loop(self):
        """发送循环：连接与握手管理 -> 取最新帧 -> 原有发送API"""
        interval = MEDIA_CONFIG.get('video_interval', 0.1)
        while self.streaming:
            try:
                if not self.conn.is_connected():
                    if self._handshake_done:
                        self.logger.warning("连接断开，暂停视频发送")
                    # 清空队列以避免旧帧堆积
                    try:
                        while not self.video_frame_queue.empty():
                            self.video_frame_queue.get_nowait()
                        while not self.video_raw_frame_queue.empty():
                            self.video_raw_frame_queue.get_nowait()
                    except Exception:
                        pass
                    # 断线后标记握手未完成，待重连后重新握手
                    self._handshake_done = False
                    time.sleep(1.0)
                    continue

                # 若已重连但握手未完成，执行一次重连握手
                if not self._handshake_done:
                    if self._send_video_handshake(timeout=5):
                        self._handshake_done = True
                        self.logger.info("重连握手成功，恢复视频发送")
                    else:
                        time.sleep(2.0)
                        continue

                # 取队列中最新的一帧（丢弃旧帧，确保实时）
                try:
                    data = self.video_frame_queue.get(timeout=interval)
                    # 若期间产生更多帧，持续提取，保留最新一帧
                    while not self.video_frame_queue.empty():
                        try:
                            data = self.video_frame_queue.get_nowait()
                        except Exception:
                            break
                except Exception:
                    # 超时无新帧，继续下一轮
                    continue

                # 使用原有发送逻辑，确保服务器能收到
                sent = self.conn.send_binary_quick(data, timeout=float(MEDIA_CONFIG.get('video_send_timeout', 0.05)))
                if not sent:
                    self.logger.warning("视频帧发送失败")
            except Exception as e:
                self.logger.error(f"视频发送异常: {e}")
                time.sleep(0.5)

    def _video_inference_loop(self):
        """推理循环：取最新原始帧 -> 两模型推理 -> 发送文本结果"""
        interval = float(MEDIA_CONFIG.get('analysis_interval', 0.2))
        while self.streaming:
            try:
                # 取队列中最新的原始帧
                try:
                    frame = self.video_raw_frame_queue.get(timeout=interval)
                    while not self.video_raw_frame_queue.empty():
                        try:
                            frame = self.video_raw_frame_queue.get_nowait()
                        except Exception:
                            break
                except Exception:
                    # 没有新帧，继续
                    time.sleep(interval)
                    continue

                bgr_frame = frame

                # 推理
                results = {}
                try:
                    if self.models:
                        results = self.models.infer_both(bgr_frame)
                except Exception as ie:
                    self.logger.debug(f"模型推理失败: {ie}")

                if results:
                    # 组合文本输出 + 简单方向提示
                    texts = []
                    road = results.get('road')
                    cls = results.get('classification')

                    direction_hint = None
                    offset = None
                    if road:
                        ratio = float(road.get('road_ratio', 0.0))
                        offset = float(road.get('center_offset', 0.0))
                        texts.append(f"道路占比{ratio:.2f}, 中心偏移{offset:.2f}")
                        # 超阈值则给出方向提示
                        try:
                            thr = float(MEDIA_CONFIG.get('road_offset_threshold', 0.2))
                            if abs(offset) >= thr:
                                direction_hint = "建议向右微调" if offset > 0 else "建议向左微调"
                        except Exception:
                            pass

                    if cls:
                        name = str(cls.get('class_name', ''))
                        conf = float(cls.get('confidence', 0.0))
                        risk = str(cls.get('risk_level', 'low'))
                        texts.append(f"检测到{name}(置信度{conf:.2f}, 风险{risk})")

                    content = '；'.join(texts) if texts else ''
                    if direction_hint:
                        content = f"{content}；{direction_hint}" if content else direction_hint

                    # 规则：仅在高风险或道路偏移超阈值、或检测到障碍物时播报
                    classification_should_speak = False
                    offset_should_speak = False
                    try:
                        # 道路偏移阈值
                        if offset is not None:
                            thr = float(MEDIA_CONFIG.get('road_offset_threshold', 0.2))
                            if abs(offset) >= thr:
                                offset_should_speak = True
                        # 分类高风险或障碍物
                        if cls:
                            risk = str(cls.get('risk_level', 'low'))
                            name = str(cls.get('class_name', ''))
                            high_only = bool(MEDIA_CONFIG.get('classification_speak_high_risk_only', False))
                            if name and name != '正常':
                                if high_only:
                                    classification_should_speak = (risk.lower() == 'high')
                                else:
                                    classification_should_speak = True
                    except Exception:
                        pass

                    if content:
                        # 在终端打印全部结果
                        self.logger.info(f"模型结果: {content}")
                        # 扬声器播报（仅在触发条件时，且节流）
                        if classification_should_speak or offset_should_speak:
                            now = time.time()
                            if now - self._last_alert_ts >= self._alert_cooldown:
                                self._last_alert_ts = now
                                # 优先播报障碍物类型
                                speak_text = None
                                if classification_should_speak and cls:
                                    obstacle = str(cls.get('class_name', '障碍物'))
                                    dist_m = self._ultra_latest_m
                                    if dist_m is not None:
                                        speak_text = f"前方{dist_m:.1f}米有{obstacle}，请小心通过"
                                    else:
                                        speak_text = f"前方有{obstacle}，请小心通过"
                                elif offset_should_speak:
                                    if direction_hint:
                                        speak_text = f"道路偏移较大，{direction_hint}"
                                    else:
                                        speak_text = "道路偏移较大，请调整方向"
                                threading.Thread(target=self._speak_text_espeak, args=(speak_text,), daemon=True).start()

                time.sleep(interval)
            except Exception as e:
                self.logger.error(f"推理线程异常: {e}")
                time.sleep(0.5)

    def _speak_text_espeak(self, text: str, lang: str = 'zh'):
        """使用 espeak 播报文本，不依赖服务器。"""
        try:
            subprocess.run(['espeak', '-v', lang, text], timeout=10)
        except FileNotFoundError:
            self.logger.warning("espeak 未安装，无法播报语音。请安装: sudo apt-get install espeak")
        except Exception as e:
            self.logger.debug(f"espeak 播报失败: {e}")