import sys
import numpy as np
import os
import onnxruntime as ort
from PIL import Image, ImageDraw, ImageFont

# 计算两个框的IoU
def calculate_iou(box1, box2):
    x1_min, y1_min, w1, h1 = box1[:4]
    x2_min, y2_min, w2, h2 = box2[:4]

    x1_max, y1_max = x1_min + w1, y1_min + h1
    x2_max, y2_max = x2_min + w2, y2_min + h2

    inter_x_min = max(x1_min, x2_min)
    inter_y_min = max(y1_min, y2_min)
    inter_x_max = min(x1_max, x2_max)
    inter_y_max = min(y1_max, y2_max)

    inter_area = (inter_x_max - inter_x_min) * (inter_y_max - inter_y_min) if inter_x_min < inter_x_max and inter_y_min < inter_y_max else 0

    box1_area = w1 * h1
    box2_area = w2 * h2

    return inter_area / (box1_area + box2_area - inter_area)

# 合并同类框并保留概率最大的那个
def merge_similar_boxes_by_probability(full_data, threshold=0.9):
    if len(full_data) == 0:
        return []

    merged_data = []
    used = np.zeros(len(full_data), dtype=bool)

    for i in range(len(full_data)):
        if used[i]:
            continue

        current_data = full_data[i]
        current_prob = current_data[-2]
        current_class_idx = int(current_data[-1])

        for j in range(i + 1, len(full_data)):
            if used[j]:
                continue

            # 只对同一类别的框进行合并
            if int(full_data[j][-1]) == current_class_idx and calculate_iou(current_data, full_data[j]) > threshold:
                if full_data[j][-2] > current_prob:
                    current_data = full_data[j]
                    current_prob = full_data[j][-2]
                used[j] = True

        merged_data.append(current_data)

    return merged_data

class LoadDetectOnnx:
    def __init__(self, model_path, confidence=0.8):
        self.confidence = confidence
        self.classes = ['普通士兵', '工兵', '火箭兵', '指挥官']
        self.font_path = './simhei.ttf'  # Replace with the correct font path
        self.session = self._init_model(model_path)

    def _init_model(self, model_path):
        session = ort.InferenceSession(model_path)
        return session

    def load_and_preprocess_image(self, image_path, target_size=(640, 640)):
        image = Image.open(image_path).convert('RGB')
        original_size = image.size
        image = image.resize(target_size)
        image_array = np.array(image).astype(np.float32) / 255.0
        image_array = np.transpose(image_array, (2, 0, 1))
        image_array = np.expand_dims(image_array, axis=0)
        return image_array, original_size

    def run_inference(self, image_array):
        input_name = self.session.get_inputs()[0].name
        result = self.session.run(None, {input_name: image_array})
        return result

    def draw_boxes_on_image(self, image, boxes, original_size, font_size=20, resized_size=(640, 640)):
        draw = ImageDraw.Draw(image)
        width_ratio = original_size[0] / resized_size[0]
        height_ratio = original_size[1] / resized_size[1]
        font = ImageFont.truetype(self.font_path, font_size)

        class_counts = {class_name: 0 for class_name in self.classes}  # Initialize class counts

        for box in boxes:
            x, y, w, h, prob, class_idx = box[:6]  # Using class_idx for label
            x_min = int((x - w / 2) * width_ratio)
            y_min = int((y - h / 2) * height_ratio)
            x_max = int((x + w / 2) * width_ratio)
            y_max = int((y + h / 2) * height_ratio)

            # Draw rectangle
            draw.rectangle([x_min, y_min, x_max, y_max], outline='red', width=2)

            # Draw label and probability
            text = f"{self.classes[int(class_idx)]} {prob:.2f}"
            draw.text((x_min, y_min), text, fill='red', font=font)
            class_counts[self.classes[int(class_idx)]] += 1  # Increment count for the class

        return image, class_counts

    def detect_object(self, img_src):
        image_array, original_size = self.load_and_preprocess_image(img_src)
        outputs = self.run_inference(image_array)

        output_array = outputs[0]
        if output_array.shape == (1, 8, 8400):  # Check if shape matches model's output
            output_array = np.squeeze(output_array)

            full_data = []

            for idx in range(8400):
                last_four_values = output_array[-4:, idx]
                max_probability = np.max(last_four_values)
                max_index = np.argmax(last_four_values)

                if max_probability > self.confidence:
                    # Combine bbox and class information
                    modified_data = np.concatenate([output_array[:-4, idx], [max_probability, max_index]])
                    full_data.append(modified_data)

            # Merge overlapping boxes
            merged_data = merge_similar_boxes_by_probability(full_data, threshold=0.5)
            original_image = Image.open(img_src).convert('RGB')
            image_with_boxes, class_counts = self.draw_boxes_on_image(original_image, merged_data, original_size)

            return image_with_boxes, class_counts
        return None, {}

def main(area):
    model_path = os.path.join(os.path.dirname(__file__), "best.onnx")
    infer = LoadDetectOnnx(model_path)

    img_list = os.listdir(os.path.join(os.path.dirname(__file__), 'image/photo'))

    total_class_counts = {class_name: 0 for class_name in infer.classes}  # Initialize total class counts

    # print(f"Processing images with area: {area}")  # Output input parameter

    for img in img_list:
        if area in img:
            image_input_path = os.path.join(os.path.dirname(__file__), 'image/photo', img)
            output_image, class_counts = infer.detect_object(image_input_path)

            if output_image is not None:
                image_output_path = os.path.join(os.path.dirname(__file__), 'image/result', img)
                output_image.save(image_output_path)  # Save the resulting image
                # print(f'Saved detected image to: {image_output_path}')
                # print(f'Class counts for this image: {class_counts}')  # Print class counts for this image

                # Update total class counts
                for class_name, count in class_counts.items():
                    total_class_counts[class_name] += count

    # Print total class counts after processing all images
    # print(f'Total class counts after processing all images: {total_class_counts}')
    print(f"区域{area},普通士兵{total_class_counts['普通士兵']}人,工兵{total_class_counts['工兵']}人,火箭兵{total_class_counts['火箭兵']}人,指挥官{total_class_counts['指挥官']}人")

if __name__ == "__main__":
    if len(sys.argv) > 1:
        main(sys.argv[1])
