import cv2
import numpy as np
import os
import sys

from config import Config

# Add the utils directory to the system path
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'yolov3-coco'))

from yolo_utils import infer_image, get_outputs_names, draw_labels_and_boxes
from light_recog import getColorList, get_color

# Constants for YOLO
CONF_THRESHOLD = Config.CONF_THRESHOLD
NMS_THRESHOLD = Config.NMS_THRESHOLD
IMG_WIDTH = Config.IMG_WIDTH
IMG_HEIGHT = Config.IMG_HEIGHT

# Load YOLO model
def load_yolo_model(config_path, weights_path):
    net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
    net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
    return net

def detect_and_classify_traffic_lights(image, net, labels, target_classes_filter):
    H, W, _ = image.shape

    # Perform inference
    _, boxes, confidences, classids, idxs = infer_image(net, get_outputs_names(net), H, W, image, None, labels, infer=True)

    detected_objects = []
    if len(idxs) > 0:
        for i in idxs.flatten():
            class_id, confidence, box = classids[i], confidences[i], boxes[i]
            x, y, w, h = box

            obj_info = {
                'class_id': class_id,
                'confidence': confidence,
                'box': box,
                'label': labels[class_id]
            }

            if labels[class_id] in target_classes_filter:
                # Ensure the bounding box is within image boundaries
                x = max(0, x)
                y = max(0, y)
                w = min(w, W - x)
                h = min(h, H - y)

                if w > 0 and h > 0:
                    traffic_light_roi = image[y:y+h, x:x+w]
                    if traffic_light_roi.size == 0:
                        continue

                    # Resize ROI for consistent color recognition
                    resized_roi = cv2.resize(traffic_light_roi, (50, 100), interpolation=cv2.INTER_AREA)

                    # Dynamic threshold adjustment (simple adaptive method for now)
                    hsv_roi = cv2.cvtColor(resized_roi, cv2.COLOR_BGR2HSV)
                    avg_saturation = np.sum(hsv_roi[:,:,1]) / (hsv_roi.shape[0] * hsv_roi.shape[1])
                    sat_low = int(avg_saturation * Config.SAT_LOW_THRESHOLD) if avg_saturation * Config.SAT_LOW_THRESHOLD < 255 else 255
                    val_low = Config.VAL_LOW_THRESHOLD # Keep fixed for now, can be made adaptive later

                    color_list = getColorList(sat_low, val_low)
                    color_name = get_color(resized_roi, color_list)
                    obj_info['color'] = color_name
                detected_objects.append(obj_info)

    # Draw labels and boxes with color information
    colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
    output_frame = draw_labels_and_boxes(image, detected_objects, colors, labels)

    return output_frame, detected_objects


def main():
    # Define paths
    YOLO_CFG = Config.YOLO_CFG
    YOLO_WEIGHTS = Config.YOLO_WEIGHTS
    YOLO_LABELS = Config.YOLO_LABELS_FILE

    # Load labels
    with open(YOLO_LABELS, 'r') as f:
        all_labels = [line.strip() for line in f.readlines()]

    if Config.DETECT_ALL_CLASSES:
        target_classes = all_labels
    else:
        target_classes = Config.TARGET_CLASSES

    # Load YOLO model
    net = load_yolo_model(YOLO_CFG, YOLO_WEIGHTS)

    if Config.RUN_MODE == "image":
        TEST_IMAGE_PATH = Config.TEST_IMAGE_PATH

        # Load test image
        if not os.path.exists(TEST_IMAGE_PATH):
            print(f"Error: Test image not found at {TEST_IMAGE_PATH}")
            sys.exit(1)
        with open(TEST_IMAGE_PATH, 'rb') as f:
            image_data = f.read()
        image_np = np.frombuffer(image_data, np.uint8)
        image = cv2.imdecode(image_np, cv2.IMREAD_COLOR)

        if image is None:
            print(f"Error: Could not decode test image from {TEST_IMAGE_PATH}")
            sys.exit(1)
        else:
            print(f"Processing image: {TEST_IMAGE_PATH}")
            output_image, detected_objects = detect_and_classify_traffic_lights(image, net, all_labels, target_classes)

            # Display results
            cv2.imshow("Traffic Light Recognition", output_image)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            print("Detected Objects:")
            for obj in detected_objects:
                print(obj)

    elif Config.RUN_MODE == "camera":
        print("Camera input selected. Starting live feed...")
        cap = cv2.VideoCapture(Config.CAMERA_INDEX)  # Use CAMERA_INDEX from config
        cap.set(cv2.CAP_PROP_FPS, Config.FRAME_RATE) # Set frame rate

        if not cap.isOpened():
            print("Error: Could not open video stream.")
            sys.exit(1)

        while True:
            ret, frame = cap.read()
            if not ret:
                print("Error: Failed to grab frame.")
                break

            output_frame, detected_objects = detect_and_classify_traffic_lights(frame, net, all_labels, target_classes)

            cv2.imshow("Traffic Light Recognition (Live)", output_frame)

            print("Detected Objects:")
            for obj in detected_objects:
                print(obj)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()

if __name__ == "__main__":
    main()