from Vission.codes.message_queue import speech_message, SpeechPriorityQueue, Priority, Subpriority
from Application.utils import tts_speak 
from Hardware.direction_enum import Direction  
from onnx_detect import YOLOv8Detector
import logging
import cv2
import time

CONFIDENCE_INDEX = 4

logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s [%(levelname)s] %(message)s",
    handlers=[logging.FileHandler("/home/elf/project_code2/app.log"), logging.StreamHandler()]
)

def open_camera():
    cap = cv2.VideoCapture(21)
    if not cap.isOpened():
        logging.error("[camera] 打开摄像头失败!")
        tts_speak("无法连接摄像头,请检查设备连接")
        return None
    logging.info("[camera] 摄像头打开成功")
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    return cap

def get_img(cap):
    ret, img = cap.read()
    if ret:
        return img
    else:
        return None
    
def release_camera(cap):
    cap.release()
    cv2.destroyAllWindows()

def detect_position(mission, img):
    from Application.application import mission as mission_class  # 延迟导入
    if not isinstance(mission, mission_class):
        logging.error("[detect_position] mission 参数类型错误")
        return None
    """
    位置检测
    ['zebra_crossing', 'crossroad', 'road', 'street', 'stairs', 'left t-junction', 't-junction']
    """
    detecter_position = mission.position_detecter
    detecter_roadstate = mission.roadstate_detecter
    message_queue = mission.message_queue
    position_list = ['zebra_crossing', 'crossroad', 'road', 'street', 'stairs', 'left t-junction', 't-junction']
    detections = detecter_position.detect(img)
    if not detections:
        logging.info("[position detect] 未识别当前位置")
        return None
    else:
        best_det = max(detections, key=lambda det: det[CONFIDENCE_INDEX])
        cls_id = best_det[5]  
        position = position_list[cls_id]
            
        road_state = detect_roadstate(img=img, detecter=detecter_roadstate)

        if position in ['zebra_crossing', 'crossroad', 'left t-junction', 't-junction']:
            if road_state is None:
               message = f"当前处于路口附近上,注意减速慢行，正在着重检测左右方车辆情况" 
            message = f"当前处于路口附近上,道路{road_state},注意减速慢行，正在着重检测左右方车辆情况"
        elif position == "street":
            if road_state is None:
               message = f"当前处于路口附近上,注意减速慢行，正在着重检测左右方车辆情况" 
            message = f"当前处于街道上,道路{road_state},正在着重检测沿途行人车辆及障碍物"
        elif position == "road":
            if road_state is None:
               message = f"当前处于路口附近上,注意减速慢行，正在着重检测左右方车辆情况" 
            message = f"当前处于马路上，道路{road_state},注意靠边行走，正在着重检测车辆来往状况"
        elif position == "stairs":
            message = f"附近有台阶，注意谨慎行走"
        
        logging.info(f"[position detect] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Text, subpriority=Subpriority.A, message=message))
        return position

def detect_car_people(mission, img):
    from Application.application import mission as mission_class  # 延迟导入
    if not isinstance(mission, mission_class):
        logging.error("[detect_car_people] mission 参数类型错误")
        return
    """
    车辆及行人检测
    ["car_left_moving","car_right_moving", "car_front_approaching","car_front_moving_left", 
    "car_front_moving_right","car_rear_approaching","pedestrian_front","pedestrian_right","pedestrian_left"]
    """
    car_people_list = ["car_left_moving", "car_right_moving", "car_front_approaching", "car_front_moving_left", 
                       "car_front_moving_right", "car_rear_approaching", "pedestrian_front", "pedestrian_right", "pedestrian_left"]

    detecter = mission.car_people_detecter
    message_queue = mission.message_queue
    cap = mission.cap

    tag = []
    detections = detecter.detect(image=img)

    if not detections:
        return

    for ret in detections:
        cls_id = ret[5]
        label = car_people_list[cls_id]
        tag.append(label)
     
    if "car_front_moving_left" in tag or "car_front_moving_right" in tag:
        if mission.current_direction == Direction.FRONT:
            message = f"前方有车辆经过，请止步"
            logging.info(f"[detect_car_people] 播报:{message}")
            message_queue.push(speech_message(priority=Priority.Alert, subpriority=Subpriority.A, message=message)) 
            
            tag_ = []
            TIME_OUT = 30
            start_time = time.time()
            while True:
                tag_.clear()
                if time.time() - start_time > TIME_OUT:
                    message = f"车辆通过完毕，可恢复通行"
                    logging.info(f"[detect_car_people] 播报:{message}")
                    message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
                    break

                logging.info("[detect_car_people] 等待前方车辆通过中》》》")
                img_ = get_img(cap=cap)
                if img_ is None:
                    continue
                detection_ = detecter.detect(image=img_)
                for ret in detection_:
                    cls_id_ = ret[5]
                    label = car_people_list[cls_id_]
                    tag_.append(label)
                    if "car_front_moving_left" not in tag_ and "car_front_moving_right" not in tag_:
                        message = f"车辆通过完毕，可恢复通行"
                        logging.info(f"[detect_car_people] 播报:{message}")
                        message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
                        break
                    time.sleep(2)
        return 0
 
    if "car_front_approaching" in tag or "car_rear_approaching" in tag:
        dirc = mission.current_direction
        dirc_name = mission.current_direction.direction_name
        if dirc in (Direction.FRONT, Direction.BACK):
            message = f"{dirc_name}方有车辆驶来，请止步"
            logging.info(f"[detect_car_people] 播报:{message}")
            message_queue.push(speech_message(priority=Priority.Alert, subpriority=Subpriority.A, message=message)) 
            tag_ = []
            TIME_OUT = 30
            start_time = time.time()
            while True:
                tag_.clear()
                if time.time() - start_time > TIME_OUT:
                    message = f"车辆通过完毕，可恢复通行"
                    logging.info(f"[detect_car_people] 播报:{message}")
                    message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
                    break

                logging.info(f"等待{dirc_name}方车辆通过中")
                img_ = get_img(cap=cap)
                if img_ is None:
                    continue
                detection_ = detecter.detect(image=img_)
                for ret in detection_:
                    cls_id_ = ret[5]
                    label = car_people_list[cls_id_]
                    tag_.append(label)
                    if "car_front_approaching" not in tag_ and "car_rear_approaching" not in tag_:
                        message = f"车辆通过完毕，可恢复通行"
                        logging.info(f"[detect_car_people] 播报:{message}")
                        message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
                        break
                    time.sleep(2)

        if dirc in (Direction.LEFT, Direction.RIGHT):
            
            message = f"{dirc_name}方有车辆驶来，请尽快通过"
            logging.info(f"[detect_car_people] 播报:{message}")
            message_queue.push(speech_message(priority=Priority.Alert, subpriority=Subpriority.B, message=message))

        return 0

    if "pedestrian_front" in tag:
        message = f"前方有行人，注意减速缓行"
        logging.info(f"[detect_car_people] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Text, subpriority=Subpriority.A, message=message))
        return 0
    if "pedestrian_left" in tag:
        message = f"左方有行人，小心碰撞"
        logging.info(f"[detect_car_people] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Text, subpriority=Subpriority.B, message=message))
        return 0 
    if "pedestrian_right" in tag:
        message = f"右方有行人，小心碰撞"
        logging.info(f"[detect_car_people] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Text, subpriority=Subpriority.B, message=message))
        return 0

def detect_static_barrier(img, detecter: YOLOv8Detector, message_queue: SpeechPriorityQueue):
    static_barreir_list = ["Static-front", "Static-right", "Safe-path-left", "Static-left", "Safe-path-front", "Safe-path-right"]
    detections = detecter.detect(image=img)
    tag = []
    safe_direction = None
    if not detections:
        return
    
    for ret in detections:
        cls_id = ret[5]
        label = static_barreir_list[cls_id]
        if "Safe-path" in label:
            if "Safe-path-left" in label:
                safe_direction = "左"
            elif "Safe-path-right" in label:
                safe_direction = "右"
            else:
                safe_direction = "中间"
        else:
            tag.append(label)

    if "Static-front" in tag:
        if safe_direction is not None:
            message = f"前方有障碍物，注意靠{safe_direction}行走"
        else:
            message = "前方有障碍物，注意小心避让"
        logging.info(f"[detect_static_barrier] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
    if "Static-left" in tag:
        if safe_direction is not None:
            message = f"左侧有障碍物，注意靠{safe_direction}行走"
        else:
            message = "右侧有障碍物，注意小心避让"
        logging.info(f"[detect_static_barrier] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))
    if "Static-right" in tag:
        if safe_direction is not None:
            message = f"右侧有障碍物，注意靠{safe_direction}行走"
        else:
            message = "右侧有障碍物，注意小心避让"
        logging.info(f"[detect_static_barrier] 播报:{message}")
        message_queue.push(speech_message(priority=Priority.Notice, subpriority=Subpriority.A, message=message))

def detect_roadstate(img, detecter: YOLOv8Detector):
    roadstate_list = ["narrow", "wide", "complex"]
    detections = detecter.detect(image=img)
    if not detections:
        return None
    else:
        best_det = max(detections, key=lambda det: det[CONFIDENCE_INDEX])
        cls_id = best_det[5]
        state = roadstate_list[cls_id]
        return state
