# -*- coding: utf-8 -*-
import argparse
import time
from pathlib import Path
import threading
import cv2
import torch
import numpy as np
import pyttsx3
import RPi.GPIO as GPIO
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
from threading import Thread
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, non_max_suppression, scale_coords, xyxy2xywh
from utils.plots import plot_one_box
from utils.torch_utils import select_device
from stereo.dianyuntu_yolo import preprocess, undistortion, getRectifyTransform, draw_line, rectifyImage, \
    stereoMatchSGBM
from stereo import stereoconfig

# 超声波传感器配置
TRIG_PIN = 17
ECHO_PIN = 27
ULTRASONIC_THRESHOLD = 50  # 50cm阈值


# TensorRT引擎类
class TRTEngine:
    def __init__(self, engine_path):
        self.logger = trt.Logger(trt.Logger.WARNING)
        with open(engine_path, 'rb') as f, trt.Runtime(self.logger) as runtime:
            self.engine = runtime.deserialize_cuda_engine(f.read())
        self.context = self.engine.create_execution_context()
        self.stream = cuda.Stream()

        # 分配输入输出内存
        self.bindings = []
        for binding in self.engine:
            size = trt.volume(self.engine.get_binding_shape(binding)) * self.engine.max_batch_size
            dtype = trt.nptype(self.engine.get_binding_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)
            device_mem = cuda.mem_alloc(host_mem.nbytes)
            self.bindings.append(int(device_mem))

            if self.engine.binding_is_input(binding):
                self.input_host = host_mem
                self.input_device = device_mem
                self.input_shape = self.engine.get_binding_shape(binding)
            else:
                self.output_host = host_mem
                self.output_device = device_mem
                self.output_shape = self.engine.get_binding_shape(binding)

    def __call__(self, img):
        # 预处理输入图像
        img = img.cpu().numpy() if isinstance(img, torch.Tensor) else img
        np.copyto(self.input_host, img.ravel())

        # 执行推理
        cuda.memcpy_htod_async(self.input_device, self.input_host, self.stream)
        self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
        cuda.memcpy_dtoh_async(self.output_host, self.output_device, self.stream)
        self.stream.synchronize()

        # 返回输出
        output = torch.tensor(self.output_host).reshape(self.output_shape)
        return output


# 盲人辅助系统主类
class BlindAssistanceSystem:
    def __init__(self, opt):
        self.opt = opt
        self.setup_ultrasonic()
        self.setup_tts()
        self.setup_model()
        self.setup_stereo()

        # 语音播报锁和计时
        self.last_speak_time = 0
        self.speak_lock = threading.Lock()

        # 类别到语音的映射
        self.label_to_speak = {
            'sidewalk points': '检测到提示盲道',
            "sidewalk of the blindness": '检测到盲道',
            "ZEC": '斑马线',
            "RL": '红灯',
            "GL": '绿灯',
            "car": "车辆",
        }

    def setup_ultrasonic(self):
        """初始化超声波传感器"""
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(TRIG_PIN, GPIO.OUT)
        GPIO.setup(ECHO_PIN, GPIO.IN)

    def setup_tts(self):
        """初始化语音引擎"""
        self.engine = pyttsx3.init()
        self.engine.setProperty('rate', 150)  # 设置语速

    def setup_model(self):
        """初始化检测模型"""
        self.device = select_device(self.opt.device)

        if self.opt.weights.endswith('.engine'):
            # TensorRT引擎
            self.model = TRTEngine(self.opt.weights)
            self.half = False  # TensorRT自动处理精度
        else:
            # 原始PyTorch模型
            self.model = attempt_load(self.opt.weights, map_location=self.device)
            self.half = self.device.type != 'cpu' and not self.opt.weights.endswith('.pt')  # FP16加速
            if self.half:
                self.model.half()

        # 获取类别名称和颜色
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.names]

    def setup_stereo(self):
        """初始化双目视觉"""
        self.config = stereoconfig.stereoCamera()
        height, width = 720, 1280
        self.map1x, self.map1y, self.map2x, self.map2y, self.Q = getRectifyTransform(height, width, self.config)

    def measure_distance(self):
        """超声波测距"""
        try:
            GPIO.output(TRIG_PIN, True)
            time.sleep(0.00001)
            GPIO.output(TRIG_PIN, False)

            timeout = 0.1
            start_time = time.time()

            while GPIO.input(ECHO_PIN) == 0:
                if time.time() - start_time > timeout:
                    raise TimeoutError("Echo start timeout")
                start_time = time.time()

            stop_time = time.time()
            while GPIO.input(ECHO_PIN) == 1:
                if time.time() - stop_time > timeout:
                    raise TimeoutError("Echo end timeout")
                stop_time = time.time()

            distance = (stop_time - start_time) * 34300 / 2
            return max(0, min(distance, 500))  # 限制在0-500cm
        except Exception as e:
            print(f"Ultrasonic error: {str(e)}")
            return -1

    def ultrasonic_obstacle_detection(self):
        """超声波避障线程"""
        while True:
            distance = self.measure_distance()
            if 0 < distance < ULTRASONIC_THRESHOLD:
                self.speak_async(f"前方 {distance:.1f} 厘米处有障碍物")
            time.sleep(0.5)

    def stereo_processing(self, im0, xyxy):
        """双目视觉测距处理"""
        height_0, width_0 = im0.shape[0:2]
        iml = im0[0:int(height_0), 0:int(width_0 / 2)]
        imr = im0[0:int(height_0), int(width_0 / 2):int(width_0)]

        # 校正图像
        iml_rectified, imr_rectified = rectifyImage(iml, imr, self.map1x, self.map1y, self.map2x, self.map2y)

        # 预处理和立体匹配
        iml_ = undistortion(iml, self.config.cam_matrix_left, self.config.distortion_l)
        imr_ = undistortion(imr, self.config.cam_matrix_right, self.config.distortion_r)
        iml_, imr_ = preprocess(iml_, imr_)
        iml_rectified_l, imr_rectified_r = rectifyImage(iml_, imr_, self.map1x, self.map1y, self.map2x, self.map2y)

        # 立体匹配和3D重建
        disp, _ = stereoMatchSGBM(iml_rectified_l, imr_rectified_r, True)
        points_3d = cv2.reprojectImageTo3D(disp, self.Q)

        # 计算目标中心点
        x = (xyxy[0] + xyxy[2]) / 2
        y = (xyxy[1] + xyxy[3]) / 2

        # 获取3D坐标
        x3d = points_3d[int(y), int(x), 0] / 10  # 转换为cm
        y3d = points_3d[int(y), int(x), 1] / 10
        z3d = points_3d[int(y), int(x), 2] / 10
        distance = np.sqrt(x3d ** 2 + y3d ** 2 + z3d ** 2)

        return x3d, y3d, z3d, distance

    def speak_async(self, text):
        """异步语音播报"""
        current_time = time.time()
        if current_time - self.last_speak_time < 1.0:  # 1秒内不重复播报
            return

        def speak():
            with self.speak_lock:
                try:
                    self.engine.say(text)
                    self.engine.runAndWait()
                    self.last_speak_time = time.time()
                except Exception as e:
                    print(f"语音播报错误: {str(e)}")

        Thread(target=speak).start()

    def detect(self):
        """主检测函数"""
        source, weights, view_img, save_img, imgsz = \
            self.opt.source, self.opt.weights, self.opt.view_img, self.opt.save_img, self.opt.img_size

        # 初始化
        webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
            ('rtsp://', 'rtmp://', 'http://', 'https://'))

        # 启动超声波线程
        ultrasonic_thread = Thread(target=self.ultrasonic_obstacle_detection, daemon=True)
        ultrasonic_thread.start()

        # 设置数据加载器
        dataset = LoadImages(source, img_size=imgsz)

        # 运行推理
        for path, img, im0s, vid_cap in dataset:
            img = torch.from_numpy(img).to(self.device)
            img = img.half() if self.half else img.float()  # uint8 to fp16/32
            img /= 255.0  # 0 - 255 to 0.0 - 1.0
            if img.ndimension() == 3:
                img = img.unsqueeze(0)

            # 推理
            if self.opt.weights.endswith('.engine'):
                # TensorRT推理
                pred = self.model(img)
            else:
                # PyTorch推理
                pred = self.model(img, augment=self.opt.augment)[0]

            # NMS
            pred = non_max_suppression(pred, self.opt.conf_thres, self.opt.iou_thres)

            # 处理检测结果
            for i, det in enumerate(pred):  # 每张图片的检测结果
                p, s, im0 = path, '', im0s.copy()

                if len(det):
                    # 调整框坐标到原始图像尺寸
                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

                    # 打印结果
                    for *xyxy, conf, cls in reversed(det):
                        label = f'{self.names[int(cls)]} {conf:.2f}'
                        plot_one_box(xyxy, im0, label=label, color=self.colors[int(cls)], line_thickness=2)

                        # 双目测距
                        x3d, y3d, z3d, distance = self.stereo_processing(im0, xyxy)
                        distance_text = f'{distance:.1f}cm'
                        cv2.putText(im0, distance_text, (int(xyxy[0]), int(xyxy[1]) - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

                        # 语音播报
                        cls_name = self.names[int(cls)]
                        if cls_name in self.label_to_speak:
                            self.speak_async(f"{self.label_to_speak[cls_name]} 距离 {distance_text}")

                # 显示结果
                if view_img:
                    cv2.imshow(str(p), im0)
                    cv2.waitKey(1)

    def cleanup(self):
        """资源清理"""
        GPIO.cleanup()
        cv2.destroyAllWindows()
        self.engine.stop()


def parse_opt():
        parser = argparse.ArgumentParser()
        parser.add_argument('--weights', type=str, default='yolov5s.pt', help='模型路径 (.pt/.engine)')
        parser.add_argument('--source', type=str, default='0', help='数据源')
        parser.add_argument('--img-size', type=int, default=640, help='推理尺寸 (pixels)')
        parser.add_argument('--conf-thres', type=float, default=0.25, help='置信度阈值')
        parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU阈值')
        parser.add_argument('--device', default='', help='cuda设备, i.e. 0 or 0,1,2,3 or cpu')
        parser.add_argument('--view-img', action='store_true', help='显示结果')
        parser.add_argument('--save-img', action='store_true', help='保存结果')
        parser.add_argument('--augment', action='store_true', help='增强推理')
        opt = parser.parse_args()
        return opt

def main():
        opt = parse_opt()
        system = BlindAssistanceSystem(opt)
        try:
            system.detect()
        finally:
            system.cleanup()

if __name__ == "__main__":
        main()