#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import json
import cv2
import numpy as np
import random
from tqdm import tqdm
from pathlib import Path
import argparse


class StrawberryPadding:
    """
    StrawberryPadding
    ================

    功能：
        从原始数据集中提取Disease类别的草莓实例，并将其填充到背景图片中，
        生成新的增强数据集。

    输入：
        - project_path: 原始项目路径（包含images/、labels/、notes.json）
        - output_path: 输出路径
        - background_path: 背景图片路径

    输出：
        - padded_images/: 填充后的图片
        - padded_labels/: 填充后的标注文件
        - extracted_instances/: 提取的实例图片

    使用方法：
        padding = StrawberryPadding(project_path, output_path, background_path)
        padding()
    """

    def __init__(self, project_path, output_path, background_path, seed=42):
        self.project_path = Path(project_path)
        self.output_path = Path(output_path)
        self.background_path = Path(background_path)
        self.seed = seed

        random.seed(seed)
        np.random.seed(seed)

        self.images_dir = self.project_path / "images"
        self.labels_dir = self.project_path / "labels"
        self.padded_images_dir = self.output_path / "padded_images"
        self.padded_labels_dir = self.output_path / "padded_labels"
        self.extracted_instances_dir = self.output_path / "extracted_instances"
        self.notes_path = self.project_path / "notes.json"

        # 创建输出目录
        os.makedirs(self.padded_images_dir, exist_ok=True)
        os.makedirs(self.padded_labels_dir, exist_ok=True)
        os.makedirs(self.extracted_instances_dir, exist_ok=True)

        # 读取类别映射
        self.class_map = self._load_class_map()

        # 获取所有原始图片文件
        self.all_image_files = sorted([
            f for f in os.listdir(self.images_dir)
            if f.lower().endswith(('.png', '.jpg', '.jpeg'))
        ])

        # 获取所有背景图片文件
        self.background_files = sorted([
            f for f in os.listdir(self.background_path)
            if f.lower().endswith(('.png', '.jpg', '.jpeg'))
        ])

        if len(self.background_files) == 0:
            print("警告: 背景图片路径中没有找到任何图片文件")

        # 存储提取的实例
        self.extracted_instances = []

        print(f"找到 {len(self.all_image_files)} 张原始图片")
        print(f"找到 {len(self.background_files)} 张背景图片")

    def _load_class_map(self):
        """从 notes.json 中读取类别索引映射"""
        if not self.notes_path.exists():
            raise FileNotFoundError(f"未找到 {self.notes_path}")
        with open(self.notes_path, "r", encoding="utf-8") as f:
            notes = json.load(f)
        return {item["id"]: item["name"] for item in notes["categories"]}

    def extract_instances(self):
        """从所有图片中提取草莓实例，只保存Disease标签对应的实例"""
        print("开始提取草莓实例...")

        instance_id = 0
        for image_file in tqdm(self.all_image_files, desc="提取实例"):
            # 读取图像
            img_path = self.images_dir / image_file
            img = cv2.imread(str(img_path))

            if img is None:
                print(f"错误: 无法读取图像 {image_file}")
                continue

            img_h, img_w = img.shape[:2]

            # 读取对应的标注文件
            label_file = img_path.stem + ".txt"
            label_path = self.labels_dir / label_file

            if not label_path.exists():
                continue

            with open(label_path, 'r', encoding='utf-8') as f:
                lines = f.readlines()

            # 解析每行标注
            for i, line in enumerate(lines):
                parts = line.strip().split()
                if len(parts) < 3:  # 至少需要类别ID和一个点
                    continue

                class_id = int(parts[0])
                class_name = self.class_map.get(class_id, "unknown")

                # 只保存Disease标签对应的实例
                if class_name.lower() != "disease":
                    continue

                # 保存原始的多边形点（归一化坐标）
                normalized_points = [float(x) for x in parts[1:]]

                # 将归一化坐标转换为像素坐标
                pixel_points = []
                for j in range(0, len(normalized_points), 2):
                    if j + 1 < len(normalized_points):
                        x = int(normalized_points[j] * img_w)
                        y = int(normalized_points[j + 1] * img_h)
                        pixel_points.append((x, y))

                if len(pixel_points) < 3:  # 多边形至少需要3个点
                    continue

                # 创建实例掩码
                mask = np.zeros((img_h, img_w), dtype=np.uint8)
                pts = np.array(pixel_points, np.int32)
                cv2.fillPoly(mask, [pts], 255)

                # 提取实例区域
                x, y, w, h = cv2.boundingRect(pts)
                if w == 0 or h == 0:
                    continue

                # 扩展边界框，确保包含整个实例
                padding = 5
                x = max(0, x - padding)
                y = max(0, y - padding)
                w = min(img_w - x, w + 2 * padding)
                h = min(img_h - y, h + 2 * padding)

                instance_region = img[y:y + h, x:x + w]
                instance_mask = mask[y:y + h, x:x + w]

                # 创建带透明通道的实例图像
                instance_rgba = cv2.cvtColor(instance_region, cv2.COLOR_BGR2BGRA)
                instance_rgba[:, :, 3] = instance_mask

                # 保存实例
                instance_filename = f"instance_{instance_id:04d}_{class_name}.png"
                instance_path = self.extracted_instances_dir / instance_filename
                cv2.imwrite(str(instance_path), instance_rgba)

                # 将多边形点转换为相对于裁剪区域的坐标
                relative_points = []
                for px, py in pixel_points:
                    rx = (px - x) / w  # 相对于裁剪区域的归一化坐标
                    ry = (py - y) / h
                    relative_points.append((rx, ry))

                # 存储实例信息（包含多边形信息）
                self.extracted_instances.append({
                    'id': instance_id,
                    'class_id': class_id,
                    'class_name': class_name,
                    'filename': instance_filename,
                    'path': instance_path,
                    'original_image': image_file,
                    'bbox': (x, y, w, h),
                    'mask': instance_mask,
                    'size': (w, h),
                    'normalized_points': normalized_points,  # 原始归一化坐标
                    'relative_points': relative_points,  # 相对于裁剪区域的归一化坐标
                    'pixel_points': pixel_points  # 原始像素坐标
                })

                instance_id += 1

        print(f"成功提取 {len(self.extracted_instances)} 个Disease草莓实例")

    def _check_overlap(self, bbox1, bbox2, threshold=0.1):
        """检查两个边界框是否重叠"""
        x1, y1, w1, h1 = bbox1
        x2, y2, w2, h2 = bbox2

        # 计算重叠区域
        overlap_x = max(0, min(x1 + w1, x2 + w2) - max(x1, x2))
        overlap_y = max(0, min(y1 + h1, y2 + h2) - max(y1, y2))
        overlap_area = overlap_x * overlap_y

        # 计算两个边界框的面积
        area1 = w1 * h1
        area2 = w2 * h2

        # 计算重叠比例
        overlap_ratio = overlap_area / min(area1, area2)

        return overlap_ratio > threshold

    def _find_non_overlapping_position_near_center(self, instance_size, existing_bboxes, bg_size, max_attempts=50):
        """为实例找到一个不重叠且靠近中轴的位置"""
        inst_w, inst_h = instance_size
        bg_w, bg_h = bg_size

        # 设置边缘缓冲区（背景尺寸的10%）
        margin_w = int(bg_w * 0.1)
        margin_h = int(bg_h * 0.1)

        # 计算中心区域（图像宽高的30%）
        center_region_w = int(bg_w * 0.3)
        center_region_h = int(bg_h * 0.3)

        # 中心区域的边界
        center_left = (bg_w - center_region_w) // 2
        center_right = center_left + center_region_w
        center_top = (bg_h - center_region_h) // 2
        center_bottom = center_top + center_region_h

        # 尝试在中心区域寻找位置
        for attempt in range(max_attempts):
            # 优先在中心区域随机选择位置
            if attempt < max_attempts * 0.7:  # 70%的尝试在中心区域
                pos_x = random.randint(
                    max(center_left, margin_w),
                    min(center_right - inst_w, bg_w - inst_w - margin_w)
                )
                pos_y = random.randint(
                    max(center_top, margin_h),
                    min(center_bottom - inst_h, bg_h - inst_h - margin_h)
                )
            else:  # 30%的尝试在整个图像范围内
                pos_x = random.randint(margin_w, max(margin_w, bg_w - inst_w - margin_w))
                pos_y = random.randint(margin_h, max(margin_h, bg_h - inst_h - margin_h))

            # 创建当前实例的边界框
            current_bbox = (pos_x, pos_y, inst_w, inst_h)

            # 检查是否与现有边界框重叠
            overlaps = False
            for bbox in existing_bboxes:
                if self._check_overlap(current_bbox, bbox):
                    overlaps = True
                    break

            if not overlaps:
                return (pos_x, pos_y), current_bbox

        # 如果找不到不重叠的位置，返回None
        return None, None

    def _get_existing_bboxes_from_labels(self, label_path, bg_w, bg_h):
        """从标注文件中获取已有实例的边界框"""
        existing_bboxes = []

        if not label_path.exists():
            return existing_bboxes

        with open(label_path, 'r', encoding='utf-8') as f:
            lines = f.readlines()

        for line in lines:
            parts = line.strip().split()
            if len(parts) < 3:  # 至少需要类别ID和一个点
                continue

            # 解析多边形点
            points = [float(x) for x in parts[1:]]

            # 将归一化坐标转换为像素坐标
            pixel_points = []
            for j in range(0, len(points), 2):
                if j + 1 < len(points):
                    x = int(points[j] * bg_w)
                    y = int(points[j + 1] * bg_h)
                    pixel_points.append((x, y))

            if len(pixel_points) < 3:
                continue

            # 计算边界框
            x_coords = [p[0] for p in pixel_points]
            y_coords = [p[1] for p in pixel_points]
            x_min = max(0, min(x_coords))
            y_min = max(0, min(y_coords))
            x_max = min(bg_w, max(x_coords))
            y_max = min(bg_h, max(y_coords))

            w = x_max - x_min
            h = y_max - y_min

            if w > 0 and h > 0:
                # 添加一些padding，避免新实例太靠近已有实例
                padding = 10
                x_min = max(0, x_min - padding)
                y_min = max(0, y_min - padding)
                w = min(bg_w - x_min, w + 2 * padding)
                h = min(bg_h - y_min, h + 2 * padding)

                existing_bboxes.append((x_min, y_min, w, h))

        return existing_bboxes

    def _apply_transforms_to_polygon(self, relative_points, orig_w, orig_h, transform_matrix, new_w, new_h):
        """将变换应用到多边形点上"""
        transformed_points = []

        for rx, ry in relative_points:
            # 将相对坐标转换为原始裁剪区域的像素坐标
            px = rx * orig_w
            py = ry * orig_h

            # 应用变换矩阵
            point = np.array([px, py, 1])
            transformed_point = np.dot(transform_matrix, point)
            tx, ty = transformed_point[0], transformed_point[1]

            # 转换为新的相对坐标
            nrx = tx / new_w
            nry = ty / new_h

            transformed_points.append((nrx, nry))

        return transformed_points

    def _analyze_background_instance_sizes(self, bg_labels_path, bg_w, bg_h):
        """分析背景图中已有实例的大小分布"""
        instance_sizes = []

        if not bg_labels_path.exists():
            return instance_sizes

        with open(bg_labels_path, 'r', encoding='utf-8') as f:
            lines = f.readlines()

        for line in lines:
            parts = line.strip().split()
            if len(parts) < 3:  # 至少需要类别ID和一个点
                continue

            # 解析多边形点
            points = [float(x) for x in parts[1:]]

            # 将归一化坐标转换为像素坐标
            pixel_points = []
            for j in range(0, len(points), 2):
                if j + 1 < len(points):
                    x = int(points[j] * bg_w)
                    y = int(points[j + 1] * bg_h)
                    pixel_points.append((x, y))

            if len(pixel_points) < 3:
                continue

            # 计算边界框
            x_coords = [p[0] for p in pixel_points]
            y_coords = [p[1] for p in pixel_points]
            x_min = max(0, min(x_coords))
            y_min = max(0, min(y_coords))
            x_max = min(bg_w, max(x_coords))
            y_max = min(bg_h, max(y_coords))

            w = x_max - x_min
            h = y_max - y_min

            if w > 0 and h > 0:
                instance_sizes.append((w, h))

        return instance_sizes

    def _apply_controlled_transforms_with_matrix(self, instance_img, target_width=None, target_height=None):
        """应用受控的随机变换到实例图像，考虑目标大小"""
        h, w = instance_img.shape[:2]
        transform_matrix = np.eye(3)  # 单位矩阵

        # 如果有目标大小，优先使用目标大小
        if target_width is not None and target_height is not None:
            scale_x = target_width / w
            scale_y = target_height / h
            # 使用较小的缩放比例，保持宽高比
            scale_factor = min(scale_x, scale_y)
        else:
            # 限制缩放范围
            scale_factor = random.uniform(0.15, 0.25)  # 进一步缩小范围

        new_w = int(w * scale_factor)
        new_h = int(h * scale_factor)

        if new_w > 0 and new_h > 0:
            instance_img = cv2.resize(instance_img, (new_w, new_h), interpolation=cv2.INTER_AREA)
            # 更新变换矩阵 - 缩放
            scale_matrix = np.array([
                [scale_factor, 0, 0],
                [0, scale_factor, 0],
                [0, 0, 1]
            ])
            transform_matrix = np.dot(scale_matrix, transform_matrix)
            w, h = new_w, new_h

        # 随机旋转（角度范围缩小）
        if random.random() > 0.5:
            angle = random.uniform(-15, 15)  # 进一步缩小旋转角度
            center = (w // 2, h // 2)
            rotation_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)

            # 创建旋转后的图像，保持透明度
            instance_rgb = instance_img[:, :, :3]
            instance_alpha = instance_img[:, :, 3]

            rotated_rgb = cv2.warpAffine(instance_rgb, rotation_matrix, (w, h),
                                         flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)
            rotated_alpha = cv2.warpAffine(instance_alpha, rotation_matrix, (w, h),
                                           flags=cv2.INTER_AREA, borderMode=cv2.BORDER_CONSTANT)

            instance_img = np.dstack((rotated_rgb, rotated_alpha))

            # 将2x3旋转矩阵转换为3x3齐次坐标矩阵
            rotation_matrix_3x3 = np.eye(3)
            rotation_matrix_3x3[:2, :] = rotation_matrix
            transform_matrix = np.dot(rotation_matrix_3x3, transform_matrix)

        # 随机亮度调整（幅度减小）
        if random.random() > 0.5:
            brightness = random.uniform(0.9, 1.1)  # 从原来的0.8-1.2调整为0.9-1.1
            instance_rgb = instance_img[:, :, :3]
            instance_alpha = instance_img[:, :, 3:]

            instance_rgb = np.clip(instance_rgb * brightness, 0, 255).astype(np.uint8)
            instance_img = np.concatenate([instance_rgb, instance_alpha], axis=2)

        return instance_img, transform_matrix

    def create_padded_images(self, num_padded=None):
        """创建填充图像，将Disease实例填充到背景图片，并更新原有的标注文件"""
        if num_padded is None:
            # 默认使用所有背景图片
            num_padded = len(self.background_files)

        print(f"开始创建 {num_padded} 张填充图像...")
        print(f"每张图像Disease实例数量范围: 1-2")
        print(f"实例大小比例: 0.8")

        if len(self.extracted_instances) == 0:
            print("错误: 没有可用的实例，请先运行 extract_instances()")
            return

        if len(self.background_files) == 0:
            print("错误: 没有可用的背景图片")
            return

        # 创建实例副本，确保不重复使用
        available_instances = self.extracted_instances.copy()
        random.shuffle(available_instances)  # 随机打乱顺序

        # 计算最大可能创建的图像数量
        max_possible_images = len(available_instances) // 1
        if num_padded > max_possible_images:
            print(f"警告: 实例数量不足，最多只能创建 {max_possible_images} 张不重复的填充图像")
            num_padded = max_possible_images

        created_count = 0
        for pad_id in tqdm(range(num_padded), desc="创建填充图像"):
            # 检查是否还有可用实例
            if len(available_instances) < 1:
                print(f"实例不足，停止创建填充图像。已创建 {created_count} 张图像")
                break

            # 按顺序选择背景图像（循环使用）
            bg_idx = pad_id % len(self.background_files)
            bg_image_file = self.background_files[bg_idx]
            bg_img_path = self.background_path / bg_image_file
            bg_img = cv2.imread(str(bg_img_path))

            if bg_img is None:
                print(f"错误: 无法读取背景图像 {bg_image_file}")
                continue

            bg_h, bg_w = bg_img.shape[:2]

            # 创建填充图像和掩码
            padded_img = bg_img.copy()
            padded_mask = np.zeros((bg_h, bg_w), dtype=np.uint8)

            # 读取背景图片原有的标注文件
            bg_label_file = bg_img_path.stem + ".txt"
            bg_labels_path = self.background_path.parent / "labels" / bg_label_file

            # 存储原有的标注和新添加的标注
            existing_annotations = []
            if bg_labels_path.exists():
                with open(bg_labels_path, 'r', encoding='utf-8') as f:
                    existing_annotations = [line.strip() for line in f.readlines()]

            # 获取背景图片中已有实例的边界框
            existing_bboxes = self._get_existing_bboxes_from_labels(bg_labels_path, bg_w, bg_h)

            # 分析背景实例的大小分布
            bg_instance_sizes = self._analyze_background_instance_sizes(bg_labels_path, bg_w, bg_h)
            if bg_instance_sizes:
                avg_bg_width = np.mean([size[0] for size in bg_instance_sizes])
                avg_bg_height = np.mean([size[1] for size in bg_instance_sizes])
            else:
                avg_bg_width = bg_w * 0.15  # 默认值
                avg_bg_height = bg_h * 0.15  # 默认值

            new_annotations = []  # 存储新添加的Disease实例标注

            # 随机选择要填充的实例数量 (1-2个)
            num_instances = random.randint(1, min(2, len(available_instances)))

            # 从可用实例中选择指定数量的实例
            selected_instances = available_instances[:num_instances]
            # 从可用列表中移除已选择的实例
            available_instances = available_instances[num_instances:]

            # 按面积从大到小排序，先放置大实例
            selected_instances.sort(key=lambda x: x['bbox'][2] * x['bbox'][3], reverse=True)

            successful_placements = 0
            for instance in selected_instances:
                # 加载实例图像
                instance_img = cv2.imread(str(instance['path']), cv2.IMREAD_UNCHANGED)
                if instance_img is None:
                    continue

                # 应用受控变换，考虑背景实例的平均大小
                instance_img, transform_matrix = self._apply_controlled_transforms_with_matrix(
                    instance_img,
                    target_width=avg_bg_width * 0.8,
                    target_height=avg_bg_height * 0.8
                )
                inst_h, inst_w = instance_img.shape[:2]

                # 应用相同的变换到多边形点
                transformed_relative_points = self._apply_transforms_to_polygon(
                    instance['relative_points'],
                    instance['bbox'][2],  # 原始宽度
                    instance['bbox'][3],  # 原始高度
                    transform_matrix,
                    inst_w, inst_h  # 变换后的宽度和高度
                )

                # 寻找不重叠且靠近中轴的位置
                position, bbox = self._find_non_overlapping_position_near_center(
                    (inst_w, inst_h), existing_bboxes, (bg_w, bg_h)
                )

                if position is None:
                    # 尝试稍微缩小实例
                    scale_factor = 0.7  # 缩小到70%
                    new_w = int(inst_w * scale_factor)
                    new_h = int(inst_h * scale_factor)

                    if new_w > 0 and new_h > 0 and new_w > bg_w * 0.05 and new_h > bg_h * 0.05:
                        instance_img = cv2.resize(instance_img, (new_w, new_h), interpolation=cv2.INTER_AREA)
                        inst_h, inst_w = instance_img.shape[:2]

                        # 更新变换矩阵和变换后的点
                        scale_matrix = np.array([
                            [scale_factor, 0, 0],
                            [0, scale_factor, 0],
                            [0, 0, 1]
                        ])
                        transform_matrix = np.dot(scale_matrix, transform_matrix)
                        transformed_relative_points = self._apply_transforms_to_polygon(
                            instance['relative_points'],
                            instance['bbox'][2],
                            instance['bbox'][3],
                            transform_matrix,
                            inst_w, inst_h
                        )

                        # 再次尝试寻找位置
                        position, bbox = self._find_non_overlapping_position_near_center(
                            (inst_w, inst_h), existing_bboxes, (bg_w, bg_h)
                        )

                if position is None:
                    # 仍然找不到位置，跳过这个实例
                    continue

                pos_x, pos_y = position

                # 提取实例的RGB和alpha通道
                instance_rgb = instance_img[:, :, :3]
                instance_alpha = instance_img[:, :, 3] / 255.0

                # 创建实例的掩码区域
                instance_binary_mask = (instance_alpha > 0.5).astype(np.uint8) * 255

                # 填充实例到背景图像
                for c in range(3):
                    padded_img[pos_y:pos_y + inst_h, pos_x:pos_x + inst_w, c] = \
                        instance_rgb[:, :, c] * instance_alpha + \
                        padded_img[pos_y:pos_y + inst_h, pos_x:pos_x + inst_w, c] * (1 - instance_alpha)

                # 更新掩码
                padded_mask[pos_y:pos_y + inst_h, pos_x:pos_x + inst_w] = \
                    np.maximum(padded_mask[pos_y:pos_y + inst_h, pos_x:pos_x + inst_w],
                               instance_binary_mask)

                # 计算变换后的多边形点在背景图上的归一化坐标
                transformed_points = []
                for rx, ry in transformed_relative_points:
                    # 将相对坐标转换为在实例图像中的像素坐标
                    px = rx * inst_w
                    py = ry * inst_h

                    # 加上在背景图上的偏移
                    bx = px + pos_x
                    by = py + pos_y

                    # 归一化到背景图尺寸
                    nx = bx / bg_w
                    ny = by / bg_h

                    # 确保坐标在有效范围内
                    nx = max(0, min(1, nx))
                    ny = max(0, min(1, ny))

                    transformed_points.append((nx, ny))

                # 创建YOLO格式的多边形标注
                yolo_annotation = f"{instance['class_id']}"
                for px, py in transformed_points:
                    yolo_annotation += f" {px:.6f} {py:.6f}"

                new_annotations.append(yolo_annotation)
                existing_bboxes.append(bbox)  # 将新实例的边界框添加到现有边界框列表中
                successful_placements += 1

            # 保存填充图像 - 使用背景图片的原文件名
            pad_image_filename = bg_image_file  # 直接使用背景图片的原文件名
            pad_image_path = self.padded_images_dir / pad_image_filename
            cv2.imwrite(str(pad_image_path), padded_img)

            # 合并原有标注和新标注
            all_annotations = existing_annotations + new_annotations

            # 保存标注文件 - 使用背景图片的原文件名
            pad_label_filename = bg_img_path.stem + ".txt"  # 对应的标注文件名
            pad_label_path = self.padded_labels_dir / pad_label_filename

            with open(pad_label_path, 'w', encoding='utf-8') as f:
                for annotation in all_annotations:
                    f.write(annotation + '\n')

            created_count += 1

        print(f"填充完成！共创建 {created_count} 张填充图像")
        print(f"剩余未使用的实例: {len(available_instances)}")

    def get_statistics(self):
        """获取数据填充统计信息"""
        print("\n=== 数据填充统计 ===")
        print(f"提取的Disease草莓实例数量: {len(self.extracted_instances)}")

        # 按类别统计实例数量
        class_counts = {}
        for instance in self.extracted_instances:
            class_name = instance['class_name']
            class_counts[class_name] = class_counts.get(class_name, 0) + 1

        print("按类别统计的实例数量:")
        for class_name, count in class_counts.items():
            print(f"  {class_name}: {count}")

        pad_images = [
            f for f in os.listdir(self.padded_images_dir)
            if f.lower().endswith(('.png', '.jpg', '.jpeg'))
        ]
        print(f"创建的填充图像数量: {len(pad_images)}")

        print(f"背景图片数量: {len(self.background_files)}")

        # 计算实例使用情况
        max_possible_images = len(self.extracted_instances) // 1  # 假设每张图最少1个实例
        print(f"理论上最多能创建 {max_possible_images} 张不重复的填充图像")

    def __call__(self, num_padded=None):
        print(f"固定随机种子为 {self.seed}")
        print("开始草莓数据填充 ...")

        # 1. 提取草莓实例（只提取Disease类别）
        self.extract_instances()

        # 2. 创建填充图像
        self.create_padded_images(num_padded=num_padded)

        # 3. 显示统计信息
        self.get_statistics()

        print("任务完成")


def main():
    parser = argparse.ArgumentParser(description='草莓数据填充')
    parser.add_argument('project_path', type=str, help='原始项目路径（包含images/、labels/、notes.json）')
    parser.add_argument('output_path', type=str, help='输出路径')
    parser.add_argument('background_path', type=str, help='背景图片路径')
    parser.add_argument('--num_padded', type=int, default=None,
                        help='要创建的填充图像数量（默认：所有背景图片）')

    args = parser.parse_args()

    padding = StrawberryPadding(
        project_path=args.project_path,
        output_path=args.output_path,
        background_path=args.background_path
    )
    padding(num_padded=args.num_padded)


if __name__ == "__main__":
    main()