import os
import os
import cv2
import torch
import concurrent.futures
import numpy as np
from typing import List, Tuple, Dict, Any
import hashlib
import json
import datetime

# 导入GDAL相关模块
try:
    from osgeo import gdal, ogr, osr
    gdal.UseExceptions()  # 提前设置异常处理模式
except ImportError:
    print("错误: 未找到GDAL模块，请确保已正确安装GDAL")
    raise

# 从配置文件导入性能优化配置和分幅配置
from config import PERFORMANCE_OPTIMIZATION, IMAGE_TILING, IMAGE_PYRAMID

# 尝试导入OpenCV的CUDA模块
has_cv2_cuda = False
try:
    cv2.cuda
    has_cv2_cuda = True
    print("OpenCV CUDA模块已加载，将使用GPU加速图像处理")
except AttributeError:
    print("OpenCV CUDA模块不可用，使用CPU进行图像处理")

# 设置GDAL支持中文路径
gdal.SetConfigOption('GDAL_FILENAME_IS_UTF8', 'YES')
gdal.SetConfigOption('SHAPE_ENCODING', 'UTF-8')

# 设置GDAL并行处理参数
gdal.SetConfigOption('GDAL_NUM_THREADS', 'ALL_CPUS')
gdal.SetConfigOption('GDAL_CACHEMAX', '1024')  # 1GB缓存

# 尝试设置GDAL的GPU加速（如果支持）
try:
    # 对于某些GDAL版本和驱动程序，可以启用GPU加速
    gdal.SetConfigOption('GDAL_USE_CUDA', 'YES')
except:
    pass


# 尝试导入进度条库
has_tqdm = False
try:
    from tqdm import tqdm
    has_tqdm = True
except ImportError:
    print("警告: 未找到tqdm库，将使用简单的进度提示")



def _process_single_tile(dataset, tile_size: Tuple[int, int], geo_transform: Tuple,                         i: int, j: int, width: int, height: int, max_retries: int = 3) -> Tuple[np.ndarray, Tuple] or None:
    """
    处理单个分幅的读取和预处理
    
    Args:
        dataset: GDAL数据集对象
        tile_size: 分幅大小
        geo_transform: 原始地理变换参数
        i: 当前分幅的行索引
        j: 当前分幅的列索引
        width: 原始影像宽度
        height: 原始影像高度
        max_retries: 遇到解码错误时的最大重试次数
        
    Returns:
        Tuple[np.ndarray, Tuple]: 分幅数据和地理变换参数，如果处理失败返回None
    """
    
    # 计算当前分幅的左上角和右下角坐标
    x_off = j * tile_size[0]
    y_off = i * tile_size[1]
    
    # 确保不超出图像边界
    curr_tile_width = min(tile_size[0], width - x_off)
    curr_tile_height = min(tile_size[1], height - y_off)
    
    # 尝试多次读取分幅数据
    for retry in range(max_retries):
        try:
            # 读取当前分幅的数据
            bands = []
            for band_idx in range(min(3, dataset.RasterCount)):
                band = dataset.GetRasterBand(band_idx + 1)
                
                # 对于ZIPDecode错误，尝试设置不同的读取参数
                if retry > 0:
                    # 禁用并行读取，避免多线程冲突
                    gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'EMPTY_DIR')
                    # 减小读取块大小，避免大内存操作
                    band.SetBlockSize(512, 512)
                    
                band_data = band.ReadAsArray(x_off, y_off, curr_tile_width, curr_tile_height)
                bands.append(band_data)
            
            # 组合波段
            if len(bands) == 1:
                tile_data = np.stack([bands[0], bands[0], bands[0]], axis=2)
            else:
                tile_data = np.stack(bands, axis=2)
            
            # 创建当前分幅的地理变换参数
            tile_geo_transform = list(geo_transform)
            tile_geo_transform[0] += x_off * geo_transform[1]
            tile_geo_transform[3] += y_off * geo_transform[5]
            
            # 读取成功后恢复原始配置
            if retry > 0:
                gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', None)
            
            return tile_data, tuple(tile_geo_transform)
        except Exception as e:
            if "ZIPDecode" in str(e):
                # 特别处理ZIPDecode错误
                if retry < max_retries - 1:
                    print(f"处理分幅 ({i}, {j}) 时出错: {e}，第 {retry + 1}/{max_retries} 次重试...")
                    # 短暂暂停后重试
                    import time
                    time.sleep(0.5)
                    continue
                else:
                    print(f"处理分幅 ({i}, {j}) 时出错: {e}，已达最大重试次数")
                    return None
            else:
                # 其他错误直接返回失败
                print(f"处理分幅 ({i}, {j}) 时出错: {e}")
                return None


def tile_remote_sensing_image(dataset, tile_size: Tuple[int, int]) -> List[Tuple[np.ndarray, Tuple[float, float, float, float, float, float]]]:
    """
    将遥感影像分割成多个分幅（多线程实现）
    
    Args:
        dataset: GDAL数据集对象
        tile_size: 分幅大小 (width, height)
        
    Returns:
        List[Tuple[np.ndarray, Tuple]]: 分幅图像数据和对应的地理变换参数
    """
    # 获取影像尺寸
    width = dataset.RasterXSize
    height = dataset.RasterYSize
    
    # 获取原始地理变换参数
    geo_transform = dataset.GetGeoTransform()
    
    tiles = []
    
    # 计算需要生成的分幅数量
    tile_width, tile_height = tile_size
    num_tiles_x = (width + tile_width - 1) // tile_width
    num_tiles_y = (height + tile_height - 1) // tile_height
    
    total_tiles = num_tiles_x * num_tiles_y
    print(f"将影像分割成 {total_tiles} 个分幅 ({num_tiles_x}x{num_tiles_y})")
    
    # 获取线程数配置
    max_workers = PERFORMANCE_OPTIMIZATION['async_processing'].get('max_workers')
    
    # 准备任务列表
    tile_tasks = []
    for i in range(num_tiles_y):
        for j in range(num_tiles_x):
            tile_tasks.append((i, j))
    
    # 使用线程池并行处理分幅
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有任务
        future_to_tile = {
            executor.submit(_process_single_tile, dataset, tile_size, geo_transform, i, j, width, height): (i, j) 
            for i, j in tile_tasks
        }
        
        # 创建进度条
        if has_tqdm:
            with tqdm(total=total_tiles, desc="分幅处理进度", unit="tile") as pbar:
                # 收集结果
                for future in concurrent.futures.as_completed(future_to_tile):
                    i, j = future_to_tile[future]
                    try:
                        result = future.result()
                        if result is not None:
                            tiles.append((result, i, j))  # 保存结果和原始索引以维持顺序
                        pbar.update(1)  # 更新进度条
                    except Exception as e:
                        print(f"分幅 ({i}, {j}) 处理异常: {e}")
                        pbar.update(1)  # 即使出错也要更新进度条
        else:
            # 如果没有tqdm库，使用简单的进度提示
            completed = 0
            # 收集结果
            for future in concurrent.futures.as_completed(future_to_tile):
                i, j = future_to_tile[future]
                try:
                    result = future.result()
                    if result is not None:
                        tiles.append((result, i, j))  # 保存结果和原始索引以维持顺序
                except Exception as e:
                    print(f"分幅 ({i}, {j}) 处理异常: {e}")
                
                completed += 1
                if completed % 10 == 0 or completed == total_tiles:
                    progress_percent = (completed / total_tiles) * 100
                    print(f"分幅处理进度: {completed}/{total_tiles} ({progress_percent:.1f}%)")
    
    # 按照原始顺序排序分幅
    tiles.sort(key=lambda x: (x[1], x[2]))  # 先按行排序，再按列排序
    tiles = [tile[0] for tile in tiles]  # 提取分幅数据和地理变换参数
    
    print(f"成功处理 {len(tiles)} 个分幅")
    
    return tiles


def _get_tiles_dir_for_image(image_path: str) -> str:
    """
    获取指定图像的分幅保存目录
    
    Args:
        image_path: 图像文件路径
        
    Returns:
        str: 分幅保存目录路径
    """
    import os
    import hashlib
    
    # 获取图像文件名的哈希值，用于区分不同图像的分幅结果
    file_name = os.path.basename(image_path)
    file_hash = hashlib.md5(file_name.encode()).hexdigest()[:8]
    
    # 构建分幅保存目录路径
    tiles_dir = os.path.join(IMAGE_TILING['tiles_dir'], file_hash)
    
    return tiles_dir

def _load_existing_tiles(image_path: str, tile_size: Tuple[int, int], pyramid_level: int = 0) -> Tuple[List[Tuple[np.ndarray, Tuple]], Dict[str, Any]]:
    """
    尝试加载已有的分幅结果
    
    Args:
        image_path: 图像文件路径
        tile_size: 分幅大小
        pyramid_level: 金字塔层级
        
    Returns:
        Tuple[List[Tuple[np.ndarray, Tuple]], Dict[str, Any]]: 分幅列表和地理信息，如果没有已存在的分幅则返回(None, None)
    """
    import os
    import numpy as np
    import cv2
    import json
    
    # 获取分幅保存目录
    tiles_dir = _get_tiles_dir_for_image(image_path)
    
    # 如果使用了金字塔，添加层级标识
    if pyramid_level > 0:
        tiles_dir = os.path.join(tiles_dir, f'level_{pyramid_level}')
    
    # 检查分幅元数据文件是否存在
    metadata_path = os.path.join(tiles_dir, 'metadata.json')
    if not os.path.exists(metadata_path):
        return None, None
    
    try:
        # 读取元数据
        with open(metadata_path, 'r', encoding='utf-8') as f:
            metadata = json.load(f)
        
        # 检查分幅大小是否匹配
        if tuple(metadata['tile_size']) != tile_size:
            print(f"警告: 已存在的分幅大小({metadata['tile_size']})与当前请求的分幅大小({tile_size})不匹配")
            return None, None
        
        # 加载所有分幅图像
        tiles = []
        for i in range(metadata['num_tiles']):
            tile_path = os.path.join(tiles_dir, f'tile_{i}.{IMAGE_TILING["tile_image_format"]}')
            geo_path = os.path.join(tiles_dir, f'tile_{i}_geo.json')
            
            if not os.path.exists(tile_path) or not os.path.exists(geo_path):
                print(f"警告: 缺少分幅文件 tile_{i}")
                return None, None
            
            # 读取分幅图像
            tile_data = cv2.imread(tile_path)
            if tile_data is None:
                print(f"警告: 无法读取分幅图像 tile_{i}")
                return None, None
            
            # 将BGR转换为RGB
            tile_data = cv2.cvtColor(tile_data, cv2.COLOR_BGR2RGB)
            
            # 读取地理变换参数
            with open(geo_path, 'r', encoding='utf-8') as f:
                geo_transform = tuple(json.load(f))
            
            tiles.append((tile_data, geo_transform))
        
        print(f"成功加载 {metadata['num_tiles']} 个已存在的分幅")
        
        # 构建返回的地理信息
        geo_info = {
            'original_width': metadata['original_width'],
            'original_height': metadata['original_height'],
            'projection': metadata['projection'],
            'geo_transform': tuple(metadata['geo_transform']),
            'is_tiled': True,
            'tile_size': tuple(metadata['tile_size']),
            'num_tiles': metadata['num_tiles']
        }
        
        if 'is_pyramid' in metadata:
            geo_info['is_pyramid'] = metadata['is_pyramid']
            geo_info['pyramid_level'] = metadata['pyramid_level']
            geo_info['scale_factor'] = metadata['scale_factor']
        
        return tiles, geo_info
    except Exception as e:
        print(f"加载已存在的分幅时出错: {e}")
        return None, None

def get_pyramid_path(image_path: str) -> str:
    """
    获取指定图像的金字塔保存路径
    
    Args:
        image_path: 图像文件路径
        
    Returns:
        str: 金字塔保存目录路径
    """
    file_name = os.path.basename(image_path)
    file_hash = hashlib.md5(file_name.encode()).hexdigest()[:8]
    pyramid_dir = os.path.join(IMAGE_PYRAMID['pyramid_dir'], file_hash)
    return pyramid_dir


def build_image_pyramid(image_path: str) -> str:
    """
    为遥感图像构建金字塔
    
    Args:
        image_path: 遥感图像路径
        
    Returns:
        str: 金字塔数据保存的目录路径
    """
    if not os.path.exists(image_path):
        raise FileNotFoundError(f"图像文件不存在: {image_path}")
    
    # 获取金字塔保存目录
    pyramid_dir = get_pyramid_path(image_path)
    os.makedirs(pyramid_dir, exist_ok=True)
    
    # 检查金字塔是否已经存在
    metadata_path = os.path.join(pyramid_dir, 'metadata.json')
    if os.path.exists(metadata_path):
        try:
            with open(metadata_path, 'r', encoding='utf-8') as f:
                metadata = json.load(f)
            if metadata['levels'] == IMAGE_PYRAMID['levels']:
                print(f"金字塔数据已存在于: {pyramid_dir}")
                return pyramid_dir
        except:
            pass
    
    print(f"开始为图像 {image_path} 构建金字塔")
    
    # 打开原始影像
    dataset = gdal.Open(image_path)
    if dataset is None:
        raise Exception(f"无法打开图像文件: {image_path}")
    
    # 获取原始影像信息
    original_width = dataset.RasterXSize
    original_height = dataset.RasterYSize
    projection = dataset.GetProjection()
    geo_transform = dataset.GetGeoTransform()
    
    # 保存金字塔元数据
    metadata = {
        'original_image_path': image_path,
        'original_width': original_width,
        'original_height': original_height,
        'projection': projection,
        'geo_transform': list(geo_transform),
        'levels': IMAGE_PYRAMID['levels'],
        'timestamp': datetime.datetime.now().isoformat()
    }
    
    with open(metadata_path, 'w', encoding='utf-8') as f:
        json.dump(metadata, f, ensure_ascii=False, indent=2)
    
    # 构建各个层级的金字塔
    for level in range(1, IMAGE_PYRAMID['levels'] + 1):
        # 计算当前层级的缩放因子
        scale_factor = 1.0 / (2 ** level)
        new_width = max(1, int(original_width * scale_factor))
        new_height = max(1, int(original_height * scale_factor))
        
        print(f"构建金字塔层级 {level}: {new_width}x{new_height}")
        
        # 创建金字塔层级文件路径
        level_path = os.path.join(pyramid_dir, f'level_{level}.tif')
        
        # 使用GDAL创建金字塔层级
        driver = gdal.GetDriverByName('GTiff')
        
        # 设置创建选项
        create_options = [
            f'COMPRESS={IMAGE_PYRAMID["overviews_config"]["compress_format"]}',
            'TILED=YES',
            'BIGTIFF=IF_SAFER'
        ]
        
        if IMAGE_PYRAMID['overviews_config']['compress_format'] == 'JPEG':
            create_options.append(f'QUALITY={IMAGE_PYRAMID["overviews_config"]["quality"]}')
        
        # 创建输出数据集
        out_dataset = driver.Create(
            level_path,
            new_width,
            new_height,
            dataset.RasterCount,
            dataset.GetRasterBand(1).DataType,
            options=create_options
        )
        
        # 设置投影和地理变换
        out_dataset.SetProjection(projection)
        new_geo_transform = list(geo_transform)
        new_geo_transform[1] /= scale_factor  # 像素宽度增加
        new_geo_transform[5] /= scale_factor  # 像素高度增加
        out_dataset.SetGeoTransform(new_geo_transform)
        
        # 执行重采样
        resampling_method = {
            'NEAREST': gdal.GRA_NearestNeighbour,
            'BILINEAR': gdal.GRA_Bilinear,
            'CUBIC': gdal.GRA_Cubic,
            'CUBICSPLINE': gdal.GRA_CubicSpline,
            'LANCZOS': gdal.GRA_Lanczos
        }.get(IMAGE_PYRAMID['overviews_config']['resampling_method'], gdal.GRA_Bilinear)
        
        gdal.ReprojectImage(
            dataset,
            out_dataset,
            projection,
            projection,
            resampling_method
        )
        
        # 关闭输出数据集
        out_dataset = None
    
    # 关闭原始数据集
    dataset = None
    
    print(f"金字塔构建完成，保存至: {pyramid_dir}")
    return pyramid_dir


def read_pyramid_level(image_path: str, level: int = 0) -> Tuple[gdal.Dataset, Dict[str, Any]]:
    """
    从指定的金字塔层级读取数据
    
    Args:
        image_path: 原始图像路径
        level: 金字塔层级 (0为原始分辨率)
        
    Returns:
        Tuple[gdal.Dataset, Dict[str, Any]]: GDAL数据集和地理信息
    """
    # 验证层级参数
    if level < IMAGE_PYRAMID['min_level'] or level > IMAGE_PYRAMID['max_level']:
        raise ValueError(f"金字塔层级必须在 {IMAGE_PYRAMID['min_level']} 到 {IMAGE_PYRAMID['max_level']} 之间")
    
    # 如果是原始分辨率(level=0)，直接返回原始图像
    if level == 0:
        dataset = gdal.Open(image_path)
        if dataset is None:
            raise FileNotFoundError(f"无法打开图像文件: {image_path}")
        
        # 获取地理信息
        geo_info = {
            'original_width': dataset.RasterXSize,
            'original_height': dataset.RasterYSize,
            'projection': dataset.GetProjection(),
            'geo_transform': dataset.GetGeoTransform(),
            'is_pyramid': False,
            'pyramid_level': 0
        }
        
        return dataset, geo_info
    
    # 确保层级不超过配置的最大层级
    level = min(level, IMAGE_PYRAMID['levels'])
    
    # 获取金字塔目录
    pyramid_dir = get_pyramid_path(image_path)
    
    # 如果金字塔不存在，先构建金字塔
    if not os.path.exists(os.path.join(pyramid_dir, f'level_{level}.tif')):
        pyramid_dir = build_image_pyramid(image_path)
    
    # 打开金字塔层级文件
    level_path = os.path.join(pyramid_dir, f'level_{level}.tif')
    dataset = gdal.Open(level_path)
    
    if dataset is None:
        raise Exception(f"无法打开金字塔层级文件: {level_path}")
    
    # 读取元数据以获取原始图像信息
    metadata_path = os.path.join(pyramid_dir, 'metadata.json')
    if os.path.exists(metadata_path):
        try:
            with open(metadata_path, 'r', encoding='utf-8') as f:
                metadata = json.load(f)
            
            # 获取地理信息
            geo_info = {
                'original_width': metadata['original_width'],
                'original_height': metadata['original_height'],
                'projection': dataset.GetProjection(),
                'geo_transform': dataset.GetGeoTransform(),
                'is_pyramid': True,
                'pyramid_level': level,
                'scale_factor': 1.0 / (2 ** level)
            }
        except:
            # 如果无法读取元数据，使用当前数据集的信息
            geo_info = {
                'original_width': dataset.RasterXSize,
                'original_height': dataset.RasterYSize,
                'projection': dataset.GetProjection(),
                'geo_transform': dataset.GetGeoTransform(),
                'is_pyramid': True,
                'pyramid_level': level,
                'scale_factor': 1.0 / (2 ** level)
            }
    else:
        # 如果没有元数据，使用当前数据集的信息
        geo_info = {
            'original_width': dataset.RasterXSize,
            'original_height': dataset.RasterYSize,
            'projection': dataset.GetProjection(),
            'geo_transform': dataset.GetGeoTransform(),
            'is_pyramid': True,
            'pyramid_level': level,
            'scale_factor': 1.0 / (2 ** level)
        }
    
    return dataset, geo_info

def read_remote_sensing_image(image_path: str, max_size: int = 2048, use_cuda: bool = None, return_tensor: bool = None, enable_tiling: bool = None, pyramid_level: int = None) -> Tuple[np.ndarray or torch.Tensor or List[Tuple[np.ndarray, Tuple]], Dict[str, Any]]:
    """
    读取遥感影像并返回图像数据和地理信息，支持PyTorch CUDA张量输出、分块读取和图像分幅
    支持从指定金字塔层级读取数据
    
    Args:
        image_path: 遥感影像路径
        max_size: 图像的最大宽度或高度，超过此值将进行降采样
        use_cuda: 是否使用CUDA（None表示根据配置决定）
        return_tensor: 是否返回PyTorch张量（None表示根据配置决定）
        enable_tiling: 是否启用分幅处理（None表示根据配置决定）
        pyramid_level: 金字塔层级（None表示根据配置决定）
        
    Returns:
        Tuple[Union[np.ndarray, torch.Tensor, List[Tuple[np.ndarray, Tuple]]], Dict]: 
            如果启用分幅处理且图像较大，返回分幅列表；否则返回整幅图像数据和地理信息
    """
    # 根据配置决定是否使用CUDA和返回张量类型
    if use_cuda is None:
        use_cuda = PERFORMANCE_OPTIMIZATION['gpu_acceleration']['enabled'] and torch.cuda.is_available()
    
    if return_tensor is None:
        return_tensor = use_cuda
    
    # 根据配置决定是否启用分幅处理
    if enable_tiling is None:
        enable_tiling = IMAGE_TILING['enabled']
    
    # 根据配置决定使用的金字塔层级
    if pyramid_level is None:
        pyramid_level = IMAGE_PYRAMID['default_level']
    
    # 验证金字塔层级
    if IMAGE_PYRAMID['enabled'] and pyramid_level < 0:
        pyramid_level = 0
    
    # 尝试导入numpy（如果需要）
    try:
        import numpy as np
    except ImportError:
        print("警告: 未找到numpy模块，某些功能可能受限")
        return None, {}
    
    # 如果启用了金字塔且层级大于0，从金字塔读取数据
    if IMAGE_PYRAMID['enabled'] and pyramid_level > 0:
        dataset, pyramid_geo_info = read_pyramid_level(image_path, pyramid_level)
    else:
        # 否则读取原始图像
        dataset = gdal.Open(image_path)
        if dataset is None:
            raise FileNotFoundError(f"无法打开影像文件: {image_path}")
        
        # 获取影像原始宽度和高度
        original_width = dataset.RasterXSize
        original_height = dataset.RasterYSize
        
        # 获取影像投影信息和地理变换参数
        projection = dataset.GetProjection()
        geo_transform = dataset.GetGeoTransform()
        
        pyramid_geo_info = {
            'original_width': original_width,
            'original_height': original_height,
            'projection': projection,
            'geo_transform': geo_transform,
            'is_pyramid': False,
            'pyramid_level': 0
        }
    
    # 获取当前使用的影像信息
    width = dataset.RasterXSize
    height = dataset.RasterYSize
    projection = dataset.GetProjection()
    geo_transform = dataset.GetGeoTransform()
    
    # 确保original_width和original_height变量有值，无论是否使用金字塔
    if IMAGE_PYRAMID['enabled'] and pyramid_level > 0:
        original_width = pyramid_geo_info['original_width']
        original_height = pyramid_geo_info['original_height']
    else:
        # 此时original_width和original_height已经在前面定义
        pass
    
    # 判断是否需要进行分幅处理
    should_tile = enable_tiling and (original_width > IMAGE_TILING['tile_size'][0] or original_height > IMAGE_TILING['tile_size'][1])
    
    if should_tile:
        # 计算分幅大小
        tile_size = IMAGE_TILING['tile_size']
        
        # 如果启用自动调整分幅大小
        if IMAGE_TILING['enable_auto_size']:
            # 根据图像大小自动计算分幅大小
            auto_tile_width = min(int(original_width * IMAGE_TILING['auto_size_factor']), IMAGE_TILING['max_tile_size'])
            auto_tile_width = max(auto_tile_width, IMAGE_TILING['min_tile_size'])
            
            auto_tile_height = min(int(original_height * IMAGE_TILING['auto_size_factor']), IMAGE_TILING['max_tile_size'])
            auto_tile_height = max(auto_tile_height, IMAGE_TILING['min_tile_size'])
            
            # 为了性能和内存考虑，将分幅大小调整为接近原始设置的比例
            aspect_ratio = original_width / original_height if original_height > 0 else 1
            if aspect_ratio > 1.5:
                # 宽屏图像，优先保证宽度
                tile_size = (auto_tile_width, min(auto_tile_width, auto_tile_height * 2))
            elif aspect_ratio < 0.67:
                # 竖屏图像，优先保证高度
                tile_size = (min(auto_tile_height, auto_tile_width * 2), auto_tile_height)
            else:
                # 方形图像，平均分配
                avg_size = int((auto_tile_width + auto_tile_height) / 2)
                tile_size = (avg_size, avg_size)
            
            print(f"自动调整分幅大小为: {tile_size[0]}x{tile_size[1]}")
        
        # 尝试加载已有的分幅结果
        if IMAGE_TILING['load_existing_tiles']:
            tiles, geo_info = _load_existing_tiles(image_path, tile_size, pyramid_level)
            if tiles is not None:
                # 关闭数据集
                dataset = None
                
                return tiles, geo_info
        
        print(f"准备将图像分幅为 {tile_size[0]}x{tile_size[1]}")
        
        # 进行分幅处理
        tiles = tile_remote_sensing_image(dataset, tile_size)
        
        # 如果启用分幅保存功能，则保存分幅结果
        if IMAGE_TILING['save_tiles']:
            try:
                # 获取分幅保存目录
                tiles_dir = _get_tiles_dir_for_image(image_path)
                
                # 如果使用了金字塔，创建相应的层级目录
                if pyramid_level > 0:
                    tiles_dir = os.path.join(tiles_dir, f'level_{pyramid_level}')
                
                # 创建目录（如果不存在）
                os.makedirs(tiles_dir, exist_ok=True)
                
                # 保存元数据
                metadata = {
                    'original_width': pyramid_geo_info['original_width'],
                    'original_height': pyramid_geo_info['original_height'],
                    'projection': projection,
                    'geo_transform': list(geo_transform),
                    'tile_size': list(tile_size),
                    'num_tiles': len(tiles),
                    'timestamp': datetime.datetime.now().isoformat()
                }
                
                # 添加金字塔相关信息
                if pyramid_geo_info.get('is_pyramid', False):
                    metadata['is_pyramid'] = True
                    metadata['pyramid_level'] = pyramid_geo_info['pyramid_level']
                    metadata['scale_factor'] = pyramid_geo_info['scale_factor']
                
                metadata_path = os.path.join(tiles_dir, 'metadata.json')
                with open(metadata_path, 'w', encoding='utf-8') as f:
                    json.dump(metadata, f, ensure_ascii=False, indent=2)
                
                # 保存每个分幅图像和地理信息
                for i, (tile_data, tile_geo) in enumerate(tiles):
                    # 将RGB转换为BGR用于OpenCV保存
                    tile_bgr = cv2.cvtColor(tile_data, cv2.COLOR_RGB2BGR)
                    
                    # 保存分幅图像
                    tile_path = os.path.join(tiles_dir, f'tile_{i}.{IMAGE_TILING["tile_image_format"]}')
                    cv2.imwrite(tile_path, tile_bgr)
                    
                    # 保存地理变换参数
                    geo_path = os.path.join(tiles_dir, f'tile_{i}_geo.json')
                    with open(geo_path, 'w', encoding='utf-8') as f:
                        json.dump(list(tile_geo), f)
                
                print(f"成功保存 {len(tiles)} 个分幅到目录: {tiles_dir}")
            except Exception as e:
                print(f"保存分幅时出错: {e}")
        
        # 关闭数据集
        dataset = None
        
        # 返回分幅列表和地理信息
        geo_info = {
            'original_width': pyramid_geo_info['original_width'],
            'original_height': pyramid_geo_info['original_height'],
            'projection': projection,
            'geo_transform': geo_transform,
            'is_tiled': True,
            'tile_size': tile_size,
            'num_tiles': len(tiles)
        }
        
        # 添加金字塔相关信息
        if pyramid_geo_info.get('is_pyramid', False):
            geo_info['is_pyramid'] = True
            geo_info['pyramid_level'] = pyramid_geo_info['pyramid_level']
            geo_info['scale_factor'] = pyramid_geo_info['scale_factor']
        
        return tiles, geo_info
    else:
        # 不启用分幅处理，使用原有逻辑
        # 计算降采样比例
        scale_factor = 1.0
        if original_width > max_size or original_height > max_size:
            scale_factor = max_size / max(original_width, original_height)
            # 计算新的宽度和高度
            new_width = int(original_width * scale_factor)
            new_height = int(original_height * scale_factor)
            
            print(f"图像尺寸过大 ({original_width}x{original_height})，进行降采样至 {new_width}x{new_height}")
            
            # 创建一个降采样的数据集
            driver = gdal.GetDriverByName('MEM')
            resampled_dataset = driver.Create('', new_width, new_height, 3, gdal.GDT_Byte)
            
            # 设置投影和地理变换（考虑缩放）
            resampled_dataset.SetProjection(projection)
            new_geo_transform = list(geo_transform)
            new_geo_transform[1] /= scale_factor  # 像素宽度
            new_geo_transform[5] /= scale_factor  # 像素高度
            resampled_dataset.SetGeoTransform(new_geo_transform)
            
            # 执行重采样
            gdal.ReprojectImage(
                dataset, resampled_dataset, projection, projection,
                gdal.GRA_Bilinear  # 使用双线性插值
            )
            
            # 读取重采样后的影像数据
            bands = []
            for i in range(3):
                band = resampled_dataset.GetRasterBand(i + 1)
                band_data = band.ReadAsArray()
                bands.append(band_data)
            
            # 组合波段
            image_data = np.stack(bands, axis=2)
            
            # 更新地理变换参数
            geo_transform = new_geo_transform
            width, height = new_width, new_height
        else:
            # 图像尺寸适中，直接读取
            width, height = original_width, original_height
            
            # 读取影像数据
            # 如果是多波段影像，读取前三波段并转换为RGB格式
            bands = []
            for i in range(min(3, dataset.RasterCount)):
                band = dataset.GetRasterBand(i + 1)
                band_data = band.ReadAsArray()
                bands.append(band_data)
            
            if len(bands) == 1:
                # 单波段影像转为三波段
                image_data = np.stack([bands[0], bands[0], bands[0]], axis=2)
            else:
                # 多波段影像按顺序组合
                image_data = np.stack(bands, axis=2)
        
        # 处理数据类型，确保是8位无符号整数
        if image_data.dtype != np.uint8:
            # 线性拉伸到0-255
            min_val = image_data.min()
            max_val = image_data.max()
            if max_val > min_val:
                image_data = ((image_data - min_val) / (max_val - min_val) * 255).astype(np.uint8)
            else:
                image_data = np.zeros_like(image_data, dtype=np.uint8)
        
        # 关闭数据集
        dataset = None
        
        # 返回图像数据和地理信息
        geo_info = {
            'width': width,
            'height': height,
            'projection': projection,
            'geo_transform': geo_transform,
            'is_tiled': False
        }
        
        # 添加金字塔相关信息
        if pyramid_geo_info.get('is_pyramid', False):
            geo_info['is_pyramid'] = True
            geo_info['pyramid_level'] = pyramid_geo_info['pyramid_level']
            geo_info['scale_factor'] = pyramid_geo_info['scale_factor']
        
        return image_data, geo_info


def pixel_to_geo_coords(pixel_x: float, pixel_y: float, geo_transform: Tuple[float, ...]) -> Tuple[float, float]:
    """
    将像素坐标转换为地理坐标
    
    Args:
        pixel_x: 像素x坐标
        pixel_y: 像素y坐标
        geo_transform: 地理变换参数
        
    Returns:
        Tuple[float, float]: 地理坐标(x, y)
    """
    # 地理变换参数: [x0, dx, rx, y0, ry, dy]
    # x0, y0是左上角坐标，dx是x方向分辨率，dy是y方向分辨率（通常为负）
    # rx和ry是旋转参数，通常为0
    geo_x = geo_transform[0] + pixel_x * geo_transform[1] + pixel_y * geo_transform[2]
    geo_y = geo_transform[3] + pixel_x * geo_transform[4] + pixel_y * geo_transform[5]
    return geo_x, geo_y


def create_shapefile(output_path: str, projection: str, crs: str = None):
    """
    创建Shapefile文件
    
    Args:
        output_path: 输出文件路径
        projection: 投影信息
        crs: 坐标参考系统标识符
        
    Returns:
        Tuple[ogr.DataSource, ogr.Layer]: 数据源和图层
    """
    # 确保输出目录存在
    os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True)
    
    # 删除已存在的文件
    driver = ogr.GetDriverByName('ESRI Shapefile')
    if os.path.exists(output_path):
        driver.DeleteDataSource(output_path)
    
    # 创建数据源
    data_source = driver.CreateDataSource(output_path)
    if data_source is None:
        raise Exception(f"无法创建Shapefile: {output_path}")
    
    # 创建空间参考
    srs = osr.SpatialReference()
    if crs:
        srs.SetFromUserInput(crs)
    elif projection:
        srs.ImportFromWkt(projection)
    
    # 创建图层
    layer = data_source.CreateLayer('crop_fields', srs, ogr.wkbPolygon)
    if layer is None:
        raise Exception(f"无法在Shapefile中创建图层: {output_path}")
    
    # 添加字段
    # 面积字段
    area_field = ogr.FieldDefn('Area', ogr.OFTReal)
    area_field.SetWidth(12)
    area_field.SetPrecision(2)
    if layer.CreateField(area_field) != 0:
        raise Exception("无法创建面积字段")
    
    # 置信度字段
    confidence_field = ogr.FieldDefn('Confidence', ogr.OFTReal)
    confidence_field.SetWidth(6)
    confidence_field.SetPrecision(3)
    if layer.CreateField(confidence_field) != 0:
        raise Exception("无法创建置信度字段")
    
    return data_source, layer


def mask_to_polygon(
    mask: np.ndarray, 
    geo_transform: Tuple[float, float, float, float, float, float], 
    simplify_tolerance: float = 0.5, 
    right_angle_enabled: bool = False,
    angle_tolerance: float = 15.0,
    min_segment_length: float = 5.0,
    max_iterations: int = 3
) -> List[Tuple[List[Tuple[float, float]], float]]:
    """
    将掩码转换为多边形（支持GPU加速）
    
    Args:
        mask: 二值掩码数组
        geo_transform: 地理变换参数
        simplify_tolerance: 轮廓简化容差
        right_angle_enabled: 是否启用直角化处理
        angle_tolerance: 角度容差（度），与90度的最大偏差
        min_segment_length: 最小线段长度（像素）
        max_iterations: 直角化迭代次数
        
    Returns:
        List[Tuple[List[Tuple[float, float]], float]]: 多边形顶点列表和面积
    """
    polygons = []
    
    # 确保导入numpy
    try:
        import numpy as np
    except ImportError:
        print("错误: 未找到numpy模块，无法处理掩码数据")
        return polygons
    
    # 确保掩码是uint8类型
    mask_uint8 = mask.astype(np.uint8)
    
    # 优先使用CPU实现，因为OpenCV CUDA版本差异较大
    try:
        # 增强边缘检测质量
        enhanced_mask = enhance_edge_detection(mask_uint8)
        
        # 使用CPU版本（更稳定的实现）
        contours, _ = cv2.findContours(enhanced_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        
        for contour in contours:
            if len(contour) < 3:
                continue
            
            # 第一步：使用OpenCV的approxPolyDP进行初步抽稀
            if simplify_tolerance > 0:
                contour = cv2.approxPolyDP(contour, simplify_tolerance, True)
            
            # 第二步：如果启用了直角化处理，则进行边缘直角化
            if right_angle_enabled and len(contour) > 2:
                contour = make_right_angles(contour, angle_tolerance, min_segment_length, max_iterations)
            
            # 第三步：转换为地理坐标
            geo_points = []
            for point in contour:
                pixel_x, pixel_y = point[0]
                geo_x, geo_y = pixel_to_geo_coords(pixel_x, pixel_y, geo_transform)
                geo_points.append((geo_x, geo_y))
            
            # 计算面积（使用像素坐标计算，更准确）
            area = cv2.contourArea(contour)
            
            polygons.append((geo_points, area))
    except Exception as e:
        print(f"轮廓检测失败: {e}")
        
    return polygons


def make_right_angles(contour, angle_tolerance=15.0, min_segment_length=5.0, max_iterations=3):
    """
    将多边形边缘处理成直角
    
    Args:
        contour: 轮廓点集
        angle_tolerance: 角度容差（度），与90度的最大偏差
        min_segment_length: 最小线段长度（像素）
        max_iterations: 最大迭代次数
        
    Returns:
        处理后的轮廓点集
    """
    import numpy as np
    
    # 验证输入轮廓的有效性
    if contour is None or len(contour) < 3:
        return contour
    
    # 复制轮廓以避免修改原始数据
    contour = np.copy(contour)
    
    # 确保轮廓数据类型正确（CV_32S）
    if contour.dtype != np.int32:
        # 如果是浮点数类型，先四舍五入再转换
        if np.issubdtype(contour.dtype, np.floating):
            contour = np.round(contour).astype(np.int32)
        else:
            contour = contour.astype(np.int32)
    
    # 转换角度容差为弧度
    angle_tolerance_rad = np.radians(angle_tolerance)
    target_angle_rad = np.radians(90)
    
    # 进行多次迭代以提高直角化效果
    for _ in range(max_iterations):
        new_contour = []
        n = len(contour)
        
        for i in range(n):
            # 获取当前点和前后点
            p_prev = contour[(i - 1) % n][0]
            p_curr = contour[i][0]
            p_next = contour[(i + 1) % n][0]
            
            # 计算前后线段的向量
            vec_prev = p_curr - p_prev
            vec_next = p_next - p_curr
            
            # 计算线段长度
            len_prev = np.linalg.norm(vec_prev)
            len_next = np.linalg.norm(vec_next)
            
            # 如果线段太短，则跳过直角化处理
            if len_prev < min_segment_length or len_next < min_segment_length:
                new_contour.append([p_curr])
                continue
            
            # 计算两线段之间的角度
            # 确保向量不为零
            if len_prev > 0 and len_next > 0:
                # 归一化向量
                vec_prev_norm = vec_prev / len_prev
                vec_next_norm = vec_next / len_next
                
                # 计算夹角（使用点积）
                dot_product = np.clip(np.dot(vec_prev_norm, vec_next_norm), -1.0, 1.0)
                angle = np.arccos(dot_product)
                
                # 计算与90度的差值
                angle_diff = abs(angle - target_angle_rad)
                
                # 如果角度接近90度，则进行直角化处理
                if angle_diff <= angle_tolerance_rad:
                    # 确定要保留的方向（选择较长的线段方向）
                    if len_prev >= len_next:
                        # 保留前一个线段的方向，调整下一个线段
                        new_p_next = p_curr + vec_prev_norm * len_next
                    else:
                        # 保留后一个线段的方向，调整当前点
                        new_p_curr = p_prev + vec_next_norm * len_prev
                        new_contour.append([new_p_curr])
                        continue
                    
                    # 更新轮廓点
                    new_contour.append([p_curr])
                    # 在下一个迭代中处理新的点
                    continue
            
            # 如果不符合直角化条件，则保留原始点
            new_contour.append([p_curr])
        
        # 转换回numpy数组格式
        contour = np.array(new_contour)
        
        # 确保轮廓数据类型正确
        if contour.dtype != np.int32:
            # 如果是浮点数类型，先四舍五入再转换
            if np.issubdtype(contour.dtype, np.floating):
                contour = np.round(contour).astype(np.int32)
            else:
                contour = contour.astype(np.int32)
        
        # 如果轮廓点数量没有变化，则提前结束迭代
        if len(contour) == n:
            break
    
    # 最后再次进行抽稀，以去除可能产生的冗余点
    try:
        # 确保轮廓有足够的点数并且数据类型正确
        if len(contour) >= 3 and contour.dtype == np.int32:
            contour = cv2.approxPolyDP(contour, min_segment_length / 2, True)
    except Exception as e:
        print(f"抽稀轮廓时出错: {e}")
    
    return contour


def enhance_edge_detection(mask, canny_threshold1=50, canny_threshold2=150, dilate_iterations=1, erode_iterations=1):
    """
    增强边缘检测质量的预处理函数
    
    Args:
        mask: 二值掩码数组
        canny_threshold1: Canny边缘检测的第一个阈值
        canny_threshold2: Canny边缘检测的第二个阈值
        dilate_iterations: 膨胀操作的迭代次数
        erode_iterations: 腐蚀操作的迭代次数
        
    Returns:
        增强后的边缘掩码
    """
    # 确保导入OpenCV
    try:
        import cv2
        import numpy as np
    except ImportError:
        print("错误: 未找到必要的库，无法进行边缘增强")
        return mask
    
    # 确保掩码是uint8类型
    if mask.dtype != np.uint8:
        mask = mask.astype(np.uint8)
    
    # 应用形态学操作，先腐蚀后膨胀，去除小噪点
    if erode_iterations > 0:
        kernel = np.ones((3, 3), np.uint8)
        mask = cv2.erode(mask, kernel, iterations=erode_iterations)
    
    if dilate_iterations > 0:
        kernel = np.ones((3, 3), np.uint8)
        mask = cv2.dilate(mask, kernel, iterations=dilate_iterations)
    
    # 使用Canny边缘检测增强边缘
    edges = cv2.Canny(mask, canny_threshold1, canny_threshold2)
    
    # 将边缘转换回二值掩码
    # 创建一个与原掩码相同大小的空白图像
    enhanced_mask = np.zeros_like(mask)
    
    # 在空白图像上绘制检测到的边缘
    contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(enhanced_mask, contours, -1, 255, thickness=cv2.FILLED)
    
    # 将原始掩码与增强后的边缘结合
    combined_mask = cv2.bitwise_or(mask, enhanced_mask)
    
    return combined_mask


def add_polygon_to_shapefile(layer: ogr.Layer, polygon_points: List[Tuple[float, float]], area: float, confidence: float = 1.0):
    """
    将多边形添加到Shapefile中
    
    Args:
        layer: Shapefile图层
        polygon_points: 多边形顶点列表
        area: 面积
        confidence: 置信度
    """
    # 创建多边形
    ring = ogr.Geometry(ogr.wkbLinearRing)
    for point in polygon_points:
        ring.AddPoint(point[0], point[1])
    # 闭合多边形
    ring.AddPoint(polygon_points[0][0], polygon_points[0][1])
    
    # 创建多边形几何对象
    polygon = ogr.Geometry(ogr.wkbPolygon)
    polygon.AddGeometry(ring)
    
    # 创建要素
    feature = ogr.Feature(layer.GetLayerDefn())
    feature.SetGeometry(polygon)
    feature.SetField('Area', area)
    feature.SetField('Confidence', confidence)
    
    # 添加要素到图层
    if layer.CreateFeature(feature) != 0:
        raise Exception("无法添加要素到图层")
    
    # 释放资源
    feature = None
    

def sliding_window(image, window_size: Tuple[int, int], overlap: float, batch_size=None) -> List[Tuple]:
    """
    滑窗处理大尺寸图像（支持NumPy数组和PyTorch CUDA张量）
    
    Args:
        image: 输入图像（NumPy数组或PyTorch张量）
        window_size: 窗口大小 (width, height)
        overlap: 重叠比例 (0-1)
        
    Returns:
        List[Tuple]: 窗口图像和窗口坐标 (x, y, width, height)
    """
    import torch
    
    # 获取图像尺寸
    if isinstance(image, np.ndarray):
        h, w = image.shape[:2]
    elif isinstance(image, torch.Tensor):
        # 处理PyTorch张量 (假设格式为 [C, H, W] 或 [H, W, C])
        if image.ndim == 3:
            if image.shape[0] < min(image.shape[1], image.shape[2]):
                # 格式为 [C, H, W]
                h, w = image.shape[1], image.shape[2]
            else:
                # 格式为 [H, W, C]
                h, w = image.shape[0], image.shape[1]
        else:
            h, w = image.shape[:2]
    else:
        raise TypeError("image must be a numpy array or PyTorch tensor")
    
    window_h, window_w = window_size
    
    # 计算步长
    stride_h = int(window_h * (1 - overlap))
    stride_w = int(window_w * (1 - overlap))
    
    windows = []
    
    # 生成窗口
    for y in range(0, h, stride_h):
        for x in range(0, w, stride_w):
            # 计算窗口坐标
            x1 = x
            y1 = y
            x2 = min(x + window_w, w)
            y2 = min(y + window_h, h)
            
            # 根据图像类型截取窗口
            if isinstance(image, np.ndarray):
                window = image[y1:y2, x1:x2]
            elif isinstance(image, torch.Tensor):
                if image.ndim == 3 and image.shape[0] < min(image.shape[1], image.shape[2]):
                    # [C, H, W] 格式
                    window = image[:, y1:y2, x1:x2]
                else:
                    # [H, W, C] 或 [H, W] 格式
                    window = image[y1:y2, x1:x2]
            
            # 保存窗口和坐标
            windows.append((window, (x1, y1, x2 - x1, y2 - y1)))
    
    return windows


def merge_overlapping_polygons(polygons: List[Tuple[List[Tuple[float, float]], float]], threshold: float = 0.5, use_cuda: bool = None, batch_size: int = None) -> List[Tuple[List[Tuple[float, float]], float]]:
    """
    合并重叠的多边形（支持批处理并行计算和内存优化）
    
    Args:
        polygons: 多边形列表
        threshold: 重叠阈值
        use_cuda: 是否使用CUDA（None表示根据配置决定）
        batch_size: 批处理大小（None表示根据配置决定）
        
    Returns:
        List[Tuple[List[Tuple[float, float]], float]]: 合并后的多边形列表
    """
    # 根据配置决定是否使用CUDA和批处理大小
    if use_cuda is None:
        use_cuda = PERFORMANCE_OPTIMIZATION['gpu_acceleration']['enabled'] and torch.cuda.is_available()
    
    if batch_size is None:
        batch_size = PERFORMANCE_OPTIMIZATION['batch_processing']['batch_size']
    import torch
    import numpy as np
    
    # 如果多边形数量很少，直接返回
    if len(polygons) <= 1:
        return polygons
    
    try:
        # 创建一个列表来跟踪哪些多边形已经被合并
        merged = [False] * len(polygons)
        merged_polygons = []
        
        # 将多边形转换为几何对象进行批处理
        # 注意：这里使用的是简化实现，实际项目中应考虑使用专门的几何计算库
        
        # 计算每对多边形之间的重叠度
        # 对于大规模数据，可以使用PyTorch的矩阵运算来加速
        
        # 使用批处理策略处理多边形
        batch_size = 100  # 可根据实际情况调整
        num_batches = (len(polygons) + batch_size - 1) // batch_size
        
        # 检查是否可以使用GPU
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 批量处理多边形
        for i in range(num_batches):
            start_idx = i * batch_size
            end_idx = min((i + 1) * batch_size, len(polygons))
            
            # 如果当前批次中的多边形已经被合并，跳过
            if all(merged[start_idx:end_idx]):
                continue
            
            # 处理当前批次的多边形
            for j in range(start_idx, end_idx):
                if merged[j]:
                    continue
                
                current_polygon, current_area = polygons[j]
                current_merged = False
                
                # 尝试与已合并的多边形进行合并
                for k in range(len(merged_polygons)):
                    merged_polygon, merged_area = merged_polygons[k]
                    
                    # 计算重叠度（简化实现）
                    # 在实际项目中，应该使用更准确的几何计算方法
                    overlap_ratio = _calculate_overlap_ratio(current_polygon, merged_polygon)
                    
                    if overlap_ratio > threshold:
                        # 合并多边形（简化实现）
                        # 在实际项目中，应该使用更复杂的几何合并操作
                        merged_polygons[k] = (merged_polygon, merged_area)
                        current_merged = True
                        break
                
                if not current_merged:
                    # 将当前多边形添加到已合并列表
                    merged_polygons.append((current_polygon, current_area))
                
                merged[j] = True
        
        # 对于非常大的数据集，考虑使用PyTorch的CUDA加速
        # 以下是一个简化的示例，展示如何使用PyTorch加速重叠度计算
        if len(polygons) > 1000 and device.type == 'cuda':
            # 将多边形顶点转换为PyTorch张量
            polygon_tensors = []
            for polygon, _ in polygons:
                if len(polygon) > 0:
                    poly_tensor = torch.tensor(polygon, dtype=torch.float32, device=device)
                    polygon_tensors.append(poly_tensor)
            
            # 使用PyTorch的并行计算能力计算重叠度
            # 注意：这只是一个示例框架，实际的几何计算需要根据具体需求实现
            # ...
        
        return merged_polygons
        
    except Exception as e:
        print(f"多边形合并过程中出错: {e}")
        # 出错时回退到原始实现
        return polygons


def _calculate_overlap_ratio(polygon1: List[Tuple[float, float]], polygon2: List[Tuple[float, float]]) -> float:
    """
    计算两个多边形的重叠比例（简化实现）
    
    Args:
        polygon1: 第一个多边形的顶点列表
        polygon2: 第二个多边形的顶点列表
        
    Returns:
        float: 重叠比例
    """
    # 注意：这是一个简化的实现，实际项目中应使用专门的几何计算库
    # 如shapely来准确计算多边形重叠
    
    # 计算边界框
    def get_bbox(polygon):
        xs, ys = zip(*polygon)
        return min(xs), min(ys), max(xs), max(ys)
    
    # 获取两个多边形的边界框
    x1_min, y1_min, x1_max, y1_max = get_bbox(polygon1)
    x2_min, y2_min, x2_max, y2_max = get_bbox(polygon2)
    
    # 计算边界框的重叠区域
    overlap_x_min = max(x1_min, x2_min)
    overlap_y_min = max(y1_min, y2_min)
    overlap_x_max = min(x1_max, x2_max)
    overlap_y_max = min(y1_max, y2_max)
    
    # 如果边界框不重叠，返回0
    if overlap_x_min >= overlap_x_max or overlap_y_min >= overlap_y_max:
        return 0.0
    
    # 计算重叠区域面积和两个多边形的面积
    overlap_area = (overlap_x_max - overlap_x_min) * (overlap_y_max - overlap_y_min)
    
    # 注意：这里使用边界框面积作为多边形面积的近似值
    # 在实际项目中，应该使用更准确的多边形面积计算方法
    area1 = (x1_max - x1_min) * (y1_max - y1_min)
    area2 = (x2_max - x2_min) * (y2_max - y2_min)
    
    # 计算重叠比例（取较小面积的比例）
    if min(area1, area2) == 0:
        return 0.0
    
    return overlap_area / min(area1, area2)