import onnxruntime as rt
import numpy as np
import cv2

CLASS_NAMES = {0: "Face"}

def box_to_corners(box):
    x, y, w, h = box[0], box[1], box[2], box[3]
    return [x - w/2, y - h/2, x + w/2, y + h/2]

def get_intersection_area(box1, box2):
    box1_corners = box_to_corners(box1)
    box2_corners = box_to_corners(box2)
    x1 = max(box1_corners[0], box2_corners[0])
    y1 = max(box1_corners[1], box2_corners[1])
    x2 = min(box1_corners[2], box2_corners[2])
    y2 = min(box1_corners[3], box2_corners[3])
    return (x2 - x1) * (y2 - y1) if x1 < x2 and y1 < y2 else 0

def get_iou(box1, box2):
    inter_area = get_intersection_area(box1, box2)
    union_area = box1[2]*box1[3] + box2[2]*box2[3] - inter_area
    return inter_area / union_area if union_area > 0 else 0

def nms(predictions, conf_threshold=0.3, iou_threshold=0.45):
    conf_mask = predictions[..., 4] > conf_threshold
    filtered_preds = predictions[conf_mask]
    if len(filtered_preds) == 0:
        return []
    class_ids = np.argmax(filtered_preds[..., 5:], axis=1)
    confidences = filtered_preds[..., 4]
    sorted_indices = np.argsort(-confidences)
    filtered_preds = filtered_preds[sorted_indices]
    class_ids = class_ids[sorted_indices]
    keep_boxes = []
    while len(filtered_preds) > 0:
        best_box = filtered_preds[0]
        best_class = class_ids[0]
        keep_boxes.append(np.concatenate([best_box[:5], [best_class]]))
        ious = np.array([get_iou(best_box, box) for box in filtered_preds[1:]])
        remove_indices = np.where(ious > iou_threshold)[0] + 1
        filtered_preds = np.delete(filtered_preds, remove_indices, axis=0)
        filtered_preds = np.delete(filtered_preds, 0, axis=0)
        class_ids = np.delete(class_ids, remove_indices)
        class_ids = np.delete(class_ids, 0)
    return np.array(keep_boxes)

def draw_detections(image, scale_x, scale_y, predictions):
    output = image.copy()
    for detection in predictions:
        x_center, y_center, width, height = detection[0:4]
        x1 = int((x_center - width/2) * scale_x)
        y1 = int((y_center - height/2) * scale_y)
        x2 = int((x_center + width/2) * scale_x)
        y2 = int((y_center + height/2) * scale_y)
        cv2.rectangle(output, (x1, y1), (x2, y2), (0, 255, 0), 2)
        class_id = int(detection[5])
        confidence = round(float(detection[4]), 2)
        class_name = CLASS_NAMES.get(class_id, f"Class {class_id}")
        label = f"{class_name}: {confidence}"
        (label_width, label_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
        cv2.rectangle(output, (x1, y1 - label_height - baseline), (x1 + label_width, y1), (0, 255, 0), -1)
        cv2.putText(output, label, (x1, y1 - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
    return output

if __name__ == '__main__':
    input_height, input_width = 640, 640
    image = cv2.imread('Face_17.png')
    if not isinstance(image, np.ndarray):
        exit()
    scale_x = image.shape[1] / input_width
    scale_y = image.shape[0] / input_height
    preprocessed = image / 255.0
    preprocessed = cv2.resize(preprocessed, (input_width, input_height))
    preprocessed = np.transpose(preprocessed, (2, 0, 1))
    input_data = np.expand_dims(preprocessed, axis=0).astype(np.float32)
    try:
        session = rt.InferenceSession('Face.onnx')
    except:
        exit()
    input_name = session.get_inputs()[0].name
    output_name = session.get_outputs()[0].name
    predictions = session.run([output_name], {input_name: input_data})[0]
    predictions = np.squeeze(predictions)
    if len(predictions.shape) == 1:
        predictions = np.expand_dims(predictions, axis=0)
    predictions = np.transpose(predictions, (1, 0))
    class_confidences = predictions[..., 4:]
    max_confidences = np.max(class_confidences, axis=-1)
    
    predictions = np.insert(predictions, 4, max_confidences, axis=-1)
    results = nms(predictions, 0.3, 0.45)
    result_image = draw_detections(image, scale_x, scale_y, results)
    cv2.imwrite('output.jpg', result_image)


