import argparse
import os
import platform
import sys
from pathlib import Path
import rospy
from sensor_msgs.msg import Range
import pandas as pd
from PIL import Image
import cv2
import numpy as np
import torch
import time 
import signal
import sys
import threading
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
    LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr,
    increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh
)
from utils.torch_utils import select_device, smart_inference_mode

# 默认 f_dx 值
DEFAULT_F_DX = 0.15
DISTANCE_UPDATE_INTERVAL = 1  # Update interval in seconds

ROOT = Path("/home/qinsy/yolov5")  # 根据你的项目路径修改

def signal_handler(sig, frame):
    print("Terminating the program...")
    sys.exit(0)
    
# 捕获中断信号
signal.signal(signal.SIGINT, signal_handler)

def get_distance_from_rostopic(topic_name, callback):
    rospy.init_node('distance_listener', anonymous=True)
    distance = [None]  # Use a list to allow modification in the callback

    def distance_callback(msg):
        distance[0] = msg.range

    rospy.Subscriber(topic_name, Range, distance_callback)
    
    def update_distance():
        if distance[0] is not None:
            callback(distance[0])
    
    # Create a timer to update the distance periodically
    rate = rospy.Rate(1)  # 1 Hz
    while not rospy.is_shutdown():
        update_distance()
        rate.sleep()

def calculate_f_dx(distance):
    # 使用线性函数计算 f_dx
    f_dx = 0.1168 * distance + 0.0699
    return f_dx

def start_ros_listener(topic_name, callback):
    rospy.init_node('distance_listener', anonymous=True)
    get_distance_from_rostopic(topic_name, handle_distance)
    
def handle_distance(new_distance):
    """处理ROS接收到的距离数据，并计算f_dx。"""
    global distance
    distance[0] = calculate_f_dx(new_distance)
    print(f"Updated f_dx: {distance[0]}")

def process_image(im0, labels, f_dx):
    # print(labels)
    width, height = im0.shape[1], im0.shape[0]
    S_uv = 0

    for label in labels:
        if label[0] == '2':
            print(label)
            x_center_norm = float(label[1])
            y_center_norm = float(label[2])
            width_norm = float(label[3])
            height_norm = float(label[4])
            left = int(round((x_center_norm - 0.5 * width_norm) * width))
            top = int(round((y_center_norm - 0.5 * height_norm) * height))
            right = int(round((x_center_norm + 0.5 * width_norm) * width))
            low = int(round((y_center_norm + 0.5 * height_norm) * height))
            print(left,top,right,low)
            add_S_uv = num_uv(im0, left, top, right, low)
            S_uv += add_S_uv

    S_e = S_uv * f_dx * f_dx * 10**(-7)
    return S_e

def num_uv(img, left, top, right, low):

    # 计算裁剪区域的宽度和高度
    width = right - left
    height = low - top
    print(width,height)

    # 计算像素点数量
    num_pixels = width * height
    print(num_pixels)

    return num_pixels

@smart_inference_mode()
def run(weights=ROOT / "best_0902.pt", source="screen", data=ROOT / "data/qinsy4.yaml", imgsz=(640, 640), 
        conf_thres=0.25, iou_thres=0.45, max_det=1000, device="", view_img=True, save_txt=False, 
        save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, 
        visualize=False, update=False, project=ROOT / "runs/detect", name="exp", exist_ok=False, 
        line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1):
    
    source = str(source)
    save_img = not nosave and not source.endswith(".txt")
    is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
    is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
    webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
    screenshot = source.lower().startswith("screen")
    if is_url and is_file:
        source = check_file(source)

    # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
    (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)

    # Load model
    device = select_device(device)
    model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
    stride, names, pt = model.stride, model.names, model.pt
    imgsz = check_img_size(imgsz, s=stride)

    # Dataloader
    bs = 1
    if webcam:
        view_img = check_imshow(warn=True)
        dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
        bs = len(dataset)
    elif screenshot:
        dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
    else:
        dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
    vid_path, vid_writer = [None] * bs, [None] * bs

    # Initialize ROS
    # distance = [DEFAULT_F_DX]  # Initial default value
    # def handle_distance(new_distance):
    #     distance[0] = calculate_f_dx(new_distance)
    #     print(f"Updated f_dx: {distance[0]}")
    # topic_name = "/distance"  # Replace with your actual topic name
    # distance = get_distance_from_rostopic(topic_name)
    # f_dx = DEFAULT_F_DX / distance if distance else DEFAULT_F_DX
    # print(f"Calculated f_dx: {f_dx}")
    # def handle_distance(distance):
    #     f_dx = calculate_f_dx(distance)
    #     print(f"Distance: {distance}, f_dx: {f_dx}")
    #     run(im0, labels, f_dx)
        
    # topic_name = "/distance"  # 替换为你的实际话题名称
    # rospy.init_node('distance_listener', anonymous=True)
    # get_distance_from_rostopic(topic_name, handle_distance)
    # print(distance)
    distance = [DEFAULT_F_DX] 
    ros_thread = threading.Thread(target=start_ros_listener, args=("/distance", handle_distance))
    ros_thread.start()
    # f_dx = calculate_f_dx(distance) if distance is not None else DEFAULT_F_DX
    # print(f"Distance: {distance}, f_dx: {f_dx}")

    # Run inference
    model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))
    seen, windows = 0, []
    start_time = time.time()  # Start time for total processing

    for path, im, im0s, vid_cap, s in dataset:
        f_dx = distance[0]
        start_process_time = time.time()  # Start time for processing
        im = torch.from_numpy(im).to(model.device)
        im = im.half() if model.fp16 else im.float()
        im /= 255
        if len(im.shape) == 3:
            im = im[None]
        if model.xml and im.shape[0] > 1:
            ims = torch.chunk(im, im.shape[0], 0)

        # Inference
        start_infer_time = time.time()  # Start time for inference
        if model.xml and im.shape[0] > 1:
            pred = None
            for image in ims:
                if pred is None:
                    pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)
                else:
                    pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)
            pred = [pred, None]
        else:
            pred = model(im, augment=augment, visualize=visualize)

        pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)

        # Processing
        for i, det in enumerate(pred):
            seen += 1
            if webcam:
                p, im0, frame = path[i], im0s[i].copy(), dataset.count
                s += f"{i}: "
            else:
                p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)

            p = Path(p)
            save_path = str(Path(save_dir) / f"{Path(source).stem}.mp4") if save_img else None
            txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}")
            s += "%gx%g " % im.shape[2:]
            gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
            imc = im0.copy() if save_crop else im0
            annotator = Annotator(im0, line_width=line_thickness, example=str(names))
            labels = []

            if len(det):
                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()

                for *xyxy, conf, cls in reversed(det):
                    c = int(cls)
                    label = names[c] if hide_conf else f"{names[c]}"
                    confidence = float(conf)
                    confidence_str = f"{confidence:.2f}"

                    LOGGER.info(f"Detected {label} with confidence {confidence_str}")
                    print(f"Bounding box: {xyxy}, Confidence: {confidence_str}")

                    labels.append([str(c), *xyxy2xywh(torch.tensor(xyxy).view(1, 4)).view(-1).tolist(), confidence])

                    if save_txt:
                        with open(txt_path + ".txt", "a") as f:
                            f.write(("%g " * 5 + "%g\n") % (*xyxy2xywh(torch.tensor(xyxy).view(1, 4)).view(-1).tolist(), confidence))

                    if save_img or save_crop:
                        save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / Path(p).name, BGR=True)

                    annotator.box_label(xyxy, label, color=colors(c, True))

                # Process the image and calculate S_e
                S_e = process_image(im0, labels, f_dx)
                print(f"Calculated S_e: {S_e}")

            im0 = annotator.result()

            if save_img:
                if dataset.mode == "image":
                    cv2.imwrite(save_path, im0)
                else:  # 'video' or 'stream'
                    if vid_path[i] != save_path:  # new video
                        vid_path[i] = save_path
                        if isinstance(vid_writer[i], cv2.VideoWriter):
                            vid_writer[i].release()  # release previous video writer
                        if vid_cap:  # video
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        else:  # stream
                            fps, w, h = 30, im0.shape[1], im0.shape[0]
                        save_path = str(Path(save_path).with_suffix(".mp4"))  # force *.mp4 suffix on results videos
                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
                    vid_writer[i].write(im0)

        LOGGER.info(f"Process time: {time.time() - start_process_time:.3f}s")
        if view_img:
            cv2.imshow(str(p), im0)
            if cv2.waitKey(1) == ord("q"):
                raise StopIteration

    LOGGER.info(f"Total time: {time.time() - start_time:.3f}s")
    if save_img:
        for i, vid in enumerate(vid_writer):
            if isinstance(vid, cv2.VideoWriter):
                vid.release()

def parse_args():
    """Parse command-line arguments."""
    parser = argparse.ArgumentParser()

    # 添加参数
    parser.add_argument("--weights", type=str, default="./best_0902.pt", help="Path to model weights")
    parser.add_argument("--source", type=str, default="screen", help="Source to use for detection (default: screen)")
    parser.add_argument("--data", type=str, default="./data/qinsy4.yaml", help="Data configuration file")
    parser.add_argument("--imgsz", type=int, nargs="+", default=[640, 640], help="Image size (width, height)")
    parser.add_argument("--conf-thres", type=float, default=0.25, help="Confidence threshold")
    parser.add_argument("--iou-thres", type=float, default=0.45, help="IOU threshold")
    parser.add_argument("--max-det", type=int, default=1000, help="Maximum number of detections")
    parser.add_argument("--device", type=str, default="", help="Device to run inference on (e.g. 'cpu', '0')")
    parser.add_argument("--view-img", action="store_true", help="Display results")
    parser.add_argument("--save-txt", action="store_true", help="Save labels to .txt files")
    parser.add_argument("--save-conf", action="store_true", help="Save confidences in labels")
    parser.add_argument("--save-crop", action="store_true", help="Save cropped images")
    parser.add_argument("--nosave", action="store_true", help="Do not save images or video")
    parser.add_argument("--classes", type=int, nargs="+", default=None, help="Filter by class")
    parser.add_argument("--agnostic-nms", action="store_true", help="Apply class-agnostic NMS")
    parser.add_argument("--augment", action="store_true", help="Apply augmentations")
    parser.add_argument("--visualize", action="store_true", help="Visualize predictions")
    parser.add_argument("--update", action="store_true", help="Update model weights")
    parser.add_argument("--project", type=str, default="./runs/detect", help="Save results to this directory")
    parser.add_argument("--name", type=str, default="exp", help="Name of saved directory")
    parser.add_argument("--exist-ok", action="store_true", help="Allow existing project/name")
    parser.add_argument("--line-thickness", type=int, default=3, help="Line thickness for bounding boxes")
    parser.add_argument("--hide-labels", action="store_true", help="Hide labels in the output")
    parser.add_argument("--hide-conf", action="store_true", help="Hide confidences in the output")
    parser.add_argument("--half", action="store_true", help="Use half precision (fp16 inference)")
    parser.add_argument("--dnn", action="store_true", help="Use OpenCV DNN for inference")
    parser.add_argument("--vid-stride", type=int, default=1, help="Video frame stride")

    return parser.parse_args()

def main():
    """Main function to execute the YOLOv5 detection."""
    # 解析参数
    args = parse_args()

    # 确保设置了显示选项
    args.view_img = True  

    # 调用 run 函数并传入解析的参数
    run(**vars(args))

if __name__ == "__main__":
    # 确保 ROOT 目录在 sys.path 中
    FILE = Path(__file__).resolve()
    ROOT = FILE.parents[0]  # YOLOv5 root directory
    if str(ROOT) not in sys.path:
        sys.path.append(str(ROOT))  # add ROOT to PATH
    main()

