import torch
import numpy as np
import os
import gc
from typing import List, Dict, Any, Tuple
import cv2
import time
import torch

# 导入SAM2相关模块
from sam2.build_sam import build_sam2
from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator

# 导入自定义工具函数
from gdal_utils import (
    read_remote_sensing_image,
    mask_to_polygon,
    create_shapefile,
    add_polygon_to_shapefile,
    sliding_window,
    merge_overlapping_polygons
)

from config import SAM2_CONFIG, SLIDING_WINDOW, RESULT_FILTER, OUTPUT, PERFORMANCE_OPTIMIZATION


class SAM2CropExtractor:
    def __init__(self):
        """
        初始化SAM2耕地提取器
        """
        # 加载SAM2模型
        self.model = None
        self.mask_generator = None
        self.device = None
        
        # 加载性能优化配置
        self.perf_config = PERFORMANCE_OPTIMIZATION
        
        # 设置模型和设备
        self._setup_model()
    
    def _setup_model(self):
        """
        设置SAM2模型（根据性能优化配置）
        """
        # 选择设备
        if torch.cuda.is_available() and self.perf_config['gpu_acceleration']['enabled']:
            self.device = torch.device("cuda")
            
            # 根据配置启用bfloat16加速
            if self.perf_config['gpu_acceleration']['use_bfloat16']:
                torch.autocast("cuda", dtype=torch.bfloat16).__enter__()
            
            # 根据配置启用tf32加速（对于Ampere及以上架构GPU）
            if (self.perf_config['gpu_acceleration']['use_tf32'] and 
                torch.cuda.get_device_properties(0).major >= 8):
                torch.backends.cuda.matmul.allow_tf32 = True
                torch.backends.cudnn.allow_tf32 = True
        elif torch.backends.mps.is_available():
            self.device = torch.device("mps")
        else:
            self.device = torch.device("cpu")
        
        print(f"使用设备: {self.device}")
        
        # 打印GPU加速配置信息
        if self.device.type == 'cuda':
            gpu_config = self.perf_config['gpu_acceleration']
            print(f"GPU加速配置: bfloat16={gpu_config['use_bfloat16']}, tf32={gpu_config['use_tf32']}")
        
        # 加载模型
        print("正在加载SAM2模型...")
        start_time = time.time()
        
        try:
            self.model = build_sam2(
                SAM2_CONFIG['model_config'], 
                SAM2_CONFIG['model_checkpoint'],
                device=self.device
            )
            
            # 创建自动掩码生成器
            self.mask_generator = SAM2AutomaticMaskGenerator(
                model=self.model,
                **SAM2_CONFIG['automatic_mask_generator']
            )
            
            print(f"模型加载成功，耗时: {time.time() - start_time:.2f}秒")
        except Exception as e:
            print(f"模型加载失败: {e}")
            raise
    
    def extract_crop_fields(self, image_path: str, output_path: str = None, use_batch_processing: bool = None, batch_size: int = None, enable_tiling: bool = None):
        """
        从遥感影像中提取耕地地块（支持GPU批处理、异步计算和图像分幅）
        
        Args:
            image_path: 遥感影像路径
            output_path: 输出文件路径
            use_batch_processing: 是否使用批处理（None表示使用配置文件中的设置）
            batch_size: 批处理大小（None表示使用配置文件中的设置）
            enable_tiling: 是否启用分幅处理（None表示使用配置文件中的设置）
            
        Returns:
            str: 输出文件路径
        """
        # 使用配置文件中的设置，除非显式指定
        config_batch_enabled = self.perf_config['batch_processing']['enabled']
        config_batch_size = self.perf_config['batch_processing']['batch_size']
        
        # 确定是否使用批处理
        final_batch_enabled = use_batch_processing if use_batch_processing is not None else config_batch_enabled
        final_batch_size = batch_size if batch_size is not None else config_batch_size
        
        # 读取遥感影像（支持CUDA和PyTorch张量）
        print(f"正在读取遥感影像: {image_path}")
        image_data, geo_info = read_remote_sensing_image(
            image_path,
            use_cuda=self.perf_config['gpu_acceleration']['enabled'] and torch.cuda.is_available(),
            return_tensor=self.perf_config['gpu_acceleration']['enabled'] and torch.cuda.is_available(),
            enable_tiling=enable_tiling
        )
        
        # 如果未指定输出路径，生成默认路径
        if output_path is None:
            base_name = os.path.splitext(os.path.basename(image_path))[0]
            output_path = os.path.join(OUTPUT['output_dir'], f"{base_name}_crop_fields.shp")
        
        # 创建Shapefile
        print(f"正在创建输出文件: {output_path}")
        data_source, layer = create_shapefile(
            output_path, 
            geo_info['projection'], 
            OUTPUT['crs']
        )
        
        # 提取地块
        print("开始提取耕地地块...")
        start_time = time.time()
        
        all_polygons = []
        
        # 检查是否是分幅数据
        if geo_info.get('is_tiled', False):
            print(f"处理分幅数据，共{geo_info['num_tiles']}个分幅")
            
            # 处理每个分幅
            for i, (tile_data, tile_geo_transform) in enumerate(image_data):
                print(f"处理分幅 {i+1}/{geo_info['num_tiles']}")
                
                # 对每个分幅应用滑窗处理或直接处理
                if SLIDING_WINDOW['enabled'] and (
                        tile_data.shape[0] > SLIDING_WINDOW['window_size'][1] or 
                        tile_data.shape[1] > SLIDING_WINDOW['window_size'][0]
                    ):
                        # 使用滑窗处理分幅
                        windows = sliding_window(
                            tile_data, 
                            SLIDING_WINDOW['window_size'], 
                            SLIDING_WINDOW['overlap'],
                            batch_size=final_batch_size
                        )
                        
                        total_windows = len(windows)
                        print(f"  分幅内生成{total_windows}个窗口")
                        
                        # 检查是否满足批处理的最小窗口数要求
                        use_batch = (final_batch_enabled and self.device.type == 'cuda' and 
                                    total_windows >= self.perf_config['batch_processing']['min_windows_for_batch'])
                        
                        if use_batch:
                            # 使用批处理并行处理窗口
                            print(f"  使用GPU批处理并行处理，批大小: {final_batch_size}")
                            tile_polygons = self._process_windows_in_batches(
                                windows, 
                                tile_geo_transform, 
                                final_batch_size
                            )
                        else:
                            # 逐个处理窗口
                            tile_polygons = []
                            for j, (window, (x, y, w, h)) in enumerate(windows):
                                # 提取当前窗口中的地块
                                window_polygons = self._process_window(
                                    window, 
                                    (x, y), 
                                    tile_geo_transform
                                )
                                
                                tile_polygons.extend(window_polygons)
                                
                                # 根据配置进行内存优化
                                if (self.perf_config['memory_optimization']['enabled'] and 
                                    (j + 1) % self.perf_config['memory_optimization']['garbage_collection_interval'] == 0):
                                    import gc
                                    gc.collect()
                                    if torch.cuda.is_available():
                                        torch.cuda.empty_cache()
                else:
                    # 直接处理分幅
                    tile_polygons = self._process_window(
                        tile_data, 
                        (0, 0), 
                        tile_geo_transform
                    )
                
                # 将当前分幅的多边形添加到总列表
                all_polygons.extend(tile_polygons)
                
                # 分幅处理完成后的内存优化
                if self.perf_config['memory_optimization']['enabled']:
                    import gc
                    gc.collect()
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
        else:
            # 非分幅数据处理逻辑（原有逻辑）
            # 检查是否需要滑窗处理
            if SLIDING_WINDOW['enabled'] and (
                    image_data.shape[0] > SLIDING_WINDOW['window_size'][1] or 
                    image_data.shape[1] > SLIDING_WINDOW['window_size'][0]
                ):
                    # 使用滑窗处理（支持批处理）
                    print(f"图像尺寸较大，使用滑窗处理: {image_data.shape[1]}x{image_data.shape[0]}")
                    windows = sliding_window(
                        image_data, 
                        SLIDING_WINDOW['window_size'], 
                        SLIDING_WINDOW['overlap'],
                        batch_size=final_batch_size
                    )
                    
                    total_windows = len(windows)
                    print(f"共生成{total_windows}个窗口")
                    
                    # 检查是否满足批处理的最小窗口数要求
                    use_batch = (final_batch_enabled and self.device.type == 'cuda' and 
                                total_windows >= self.perf_config['batch_processing']['min_windows_for_batch'])
                    
                    if use_batch:
                        # 使用批处理并行处理窗口
                        print(f"使用GPU批处理并行处理，批大小: {final_batch_size}")
                        all_polygons = self._process_windows_in_batches(
                            windows, 
                            geo_info['geo_transform'], 
                            final_batch_size
                        )
                    else:
                        # 逐个处理窗口
                        for i, (window, (x, y, w, h)) in enumerate(windows):
                            print(f"处理窗口 {i+1}/{total_windows}: 位置=({x}, {y}), 大小=({w}x{h})")
                            
                            # 提取当前窗口中的地块
                            window_polygons = self._process_window(
                                window, 
                                (x, y), 
                                geo_info['geo_transform']
                            )
                            
                            all_polygons.extend(window_polygons)
                            
                            # 根据配置进行内存优化
                            if (self.perf_config['memory_optimization']['enabled'] and 
                                (i + 1) % self.perf_config['memory_optimization']['garbage_collection_interval'] == 0):
                                import gc
                                gc.collect()
                                if torch.cuda.is_available():
                                    torch.cuda.empty_cache()
            else:
                # 直接处理整幅图像
                print(f"直接处理整幅图像: {image_data.shape[1]}x{image_data.shape[0]}")
                all_polygons = self._process_window(
                    image_data, 
                    (0, 0), 
                    geo_info['geo_transform']
                )
        
        # 合并重叠的多边形（支持CUDA和批处理）
        if len(all_polygons) > 0:
                print(f"共提取{len(all_polygons)}个地块，开始合并重叠区域...")
                merged_polygons = merge_overlapping_polygons(
                    all_polygons, 
                    RESULT_FILTER['merge_overlap_threshold'],
                    use_cuda=self.perf_config['gpu_acceleration']['enabled'] and torch.cuda.is_available(),
                    batch_size=final_batch_size
                )
                print(f"合并后剩余{len(merged_polygons)}个地块")
                
                # 添加到Shapefile
                print("正在将地块写入Shapefile...")
                for i, (polygon_points, area) in enumerate(merged_polygons):
                    # 应用面积过滤
                    if RESULT_FILTER['min_area'] <= area <= RESULT_FILTER['max_area']:
                        try:
                            # 默认置信度设为1.0
                            add_polygon_to_shapefile(layer, polygon_points, area, 1.0)
                        except Exception as e:
                            print(f"添加地块{i+1}失败: {e}")
                            continue
        
        # 保存并关闭Shapefile
        data_source = None
        
        print(f"地块提取完成，耗时: {time.time() - start_time:.2f}秒")
        print(f"结果已保存至: {output_path}")
        
        return output_path
        
    def _process_windows_in_batches(self, windows: List[Tuple[np.ndarray, Tuple[int, int, int, int]]], 
                                  geo_transform: Tuple[float, float, float, float, float, float], 
                                  batch_size: int = 8) -> List[Tuple[List[Tuple[float, float]], float]]:
        """
        批量处理窗口（利用GPU并行计算能力）
        
        Args:
            windows: 窗口列表
            geo_transform: 地理变换参数
            batch_size: 批处理大小
            
        Returns:
            List[Tuple[List[Tuple[float, float]], float]]: 所有窗口的多边形列表
        """
        import torch
        import asyncio
        from concurrent.futures import ThreadPoolExecutor
        import gc
        
        # 获取异步处理配置
        async_config = self.perf_config['async_processing']
        
        # 确定最大工作线程数
        max_workers = async_config['max_workers']
        if max_workers is None:
            max_workers = min(8, os.cpu_count() + 4)
        
        all_polygons = []
        total_windows = len(windows)
        num_batches = (total_windows + batch_size - 1) // batch_size
        
        # 检查是否启用异步计算
        use_async = async_config['enabled']
        
        if use_async:
            # 创建线程池用于异步处理
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                loop = asyncio.new_event_loop()
                asyncio.set_event_loop(loop)
                
                try:
                    # 定义异步处理单个窗口的函数
                    async def process_window_async(window_idx, window, window_offset):
                        # 提交到线程池执行
                        future = loop.run_in_executor(
                            executor, 
                            self._process_window, 
                            window, 
                            window_offset, 
                            geo_transform
                        )
                        return await future
                    
                    # 批量处理窗口
                    for batch_idx in range(num_batches):
                        start_idx = batch_idx * batch_size
                        end_idx = min((batch_idx + 1) * batch_size, total_windows)
                        
                        # 当前批次的窗口
                        batch_windows = windows[start_idx:end_idx]
                        
                        # 创建异步任务
                        tasks = []
                        for i, (window, (x, y, w, h)) in enumerate(batch_windows):
                            global_idx = start_idx + i
                            tasks.append(process_window_async(global_idx, window, (x, y)))
                        
                        # 等待当前批次的所有任务完成
                        batch_results = loop.run_until_complete(asyncio.gather(*tasks))
                        
                        # 收集结果
                        for result in batch_results:
                            if result:
                                all_polygons.extend(result)
                        
                        # 打印进度
                        print(f"处理批次 {batch_idx + 1}/{num_batches}，完成窗口 {min(end_idx, total_windows)}/{total_windows}")
                        
                        # 根据配置进行内存优化
                        if self.perf_config['memory_optimization']['enabled']:
                            gc.collect()
                            if torch.cuda.is_available():
                                torch.cuda.empty_cache()
                finally:
                    # 确保事件循环正确关闭
                    loop.close()
        else:
            # 不使用异步计算，使用简单的批处理
            for batch_idx in range(num_batches):
                start_idx = batch_idx * batch_size
                end_idx = min((batch_idx + 1) * batch_size, total_windows)
                
                # 当前批次的窗口
                batch_windows = windows[start_idx:end_idx]
                
                # 处理当前批次的窗口
                for i, (window, (x, y, w, h)) in enumerate(batch_windows):
                    global_idx = start_idx + i
                    print(f"处理窗口 {global_idx + 1}/{total_windows}: 位置=({x}, {y}), 大小=({w}x{h})")
                    
                    # 提取当前窗口中的地块
                    window_polygons = self._process_window(
                        window, 
                        (x, y), 
                        geo_transform
                    )
                    
                    all_polygons.extend(window_polygons)
                
                # 打印进度
                print(f"处理批次 {batch_idx + 1}/{num_batches}，完成窗口 {min(end_idx, total_windows)}/{total_windows}")
                
                # 根据配置进行内存优化
                if self.perf_config['memory_optimization']['enabled']:
                    gc.collect()
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
        
        return all_polygons
    
    def _process_window(self, window, window_offset: Tuple[int, int], geo_transform: Tuple[float, float, float, float, float, float]) -> List[Tuple[List[Tuple[float, float]], float]]:
        """
        处理单个窗口（支持PyTorch CUDA张量）
        
        Args:
            window: 窗口图像数据（NumPy数组或PyTorch张量）
            window_offset: 窗口在原图中的偏移量 (x, y)
            geo_transform: 地理变换参数
            
        Returns:
            List[Tuple[List[Tuple[float, float]], float]]: 多边形列表
        """
        import torch
        import numpy as np
        
        polygons = []
        
        try:
            # 创建临时的地理变换参数，考虑窗口偏移
            window_geo_transform = list(geo_transform)
            window_geo_transform[0] += window_offset[0] * window_geo_transform[1]
            window_geo_transform[3] += window_offset[1] * window_geo_transform[5]
            
            # 处理不同类型的输入
            if isinstance(window, torch.Tensor):
                # 如果是PyTorch张量，确保在正确的设备上
                if window.device != self.device:
                    window = window.to(self.device)
                
                # 如果需要，将张量转换为numpy数组用于mask_generator
                # 注意：这里可能会有数据传输，但mask_generator需要numpy数组
                window_np = window.detach().cpu().numpy() if window.device.type == 'cuda' else window.numpy()
            else:
                # 假设是numpy数组
                window_np = window
                
            # 确保窗口数据类型正确
            if window_np.dtype != np.uint8:
                # 线性拉伸到0-255
                min_val = window_np.min()
                max_val = window_np.max()
                if max_val > min_val:
                    window_np = ((window_np - min_val) / (max_val - min_val) * 255).astype(np.uint8)
                else:
                    window_np = np.zeros_like(window_np, dtype=np.uint8)
            
            # 使用SAM2自动生成掩码
            masks = []
            try:
                # 确保窗口数据形状正确（SAM2期望的输入形状是(H, W, 3)或(H, W)）
                if len(window_np.shape) == 2:
                    window_np = np.stack([window_np]*3, axis=-1)  # 转换为3通道图像
                elif len(window_np.shape) == 3 and window_np.shape[-1] > 3:
                    window_np = window_np[..., :3]  # 只取前3个通道
                
                # 确保数据范围在0-255
                window_np = np.clip(window_np, 0, 255).astype(np.uint8)
                
                # 为每个窗口创建一个新的掩码生成器实例，避免状态污染
                # 这可以解决'An image must be set with .set_image(...)'错误
                window_mask_generator = SAM2AutomaticMaskGenerator(
                    model=self.model,
                    **SAM2_CONFIG['automatic_mask_generator']
                )
                
                # 调用SAM2生成掩码
                masks_result = window_mask_generator.generate(window_np)
                
                # 确保masks_result是有效列表
                if masks_result is not None and isinstance(masks_result, list):
                    masks = masks_result
                else:
                    print(f"警告: SAM2返回了无效的掩码结果类型: {type(masks_result)}")
                    masks = []
            except Exception as e:
                print(f"生成掩码失败: {str(e)}")
                # 根据不同的错误类型提供更具体的处理
                if "CUDA out of memory" in str(e):
                    # 处理内存不足的情况
                    torch.cuda.empty_cache() if torch.cuda.is_available() else None
                    print("尝试释放GPU内存并跳过当前窗口...")
                elif "An image must be set with .set_image" in str(e):
                    # 尝试备选方案：使用SAM2ImagePredictor直接设置图像并生成掩码
                    try:
                        from sam2.sam2_image_predictor import SAM2ImagePredictor
                        predictor = SAM2ImagePredictor(self.model)
                        predictor.set_image(window_np)
                        # 使用简单的点提示生成掩码
                        h, w = window_np.shape[:2]
                        points = np.array([[w//2, h//2]])
                        point_labels = np.array([1])
                        masks, scores, logits = predictor.predict(
                            point_coords=points,
                            point_labels=point_labels,
                            multimask_output=True
                        )
                        # 转换为与SAM2AutomaticMaskGenerator相同的输出格式
                        masks = [{'segmentation': masks[i]} for i in range(len(masks))]
                    except Exception as alt_e:
                        print(f"备选方案也失败: {str(alt_e)}")
                        masks = []
                masks = []
            
            # 处理每个掩码
            for mask_info in masks:
                # 确保mask_info是有效的字典
                if mask_info is None or not isinstance(mask_info, dict) or 'segmentation' not in mask_info:
                    print("警告: 跳过无效的掩码信息")
                    continue
                
                # 获取掩码
                mask = mask_info['segmentation']
                
                # 检查掩码是否有效
                if not isinstance(mask, np.ndarray) or mask.ndim != 2:
                    print(f"警告: 跳过无效的掩码类型: {type(mask)}, 维度: {getattr(mask, 'ndim', '未知')}")
                    continue
                
                # 检查掩码是否为空
                if not np.any(mask):
                    continue
                
                try:
                    # 获取直角化处理配置
                    right_angle_config = RESULT_FILTER.get('right_angle_processing', {})
                    right_angle_enabled = right_angle_config.get('enabled', False)
                    angle_tolerance = right_angle_config.get('angle_tolerance', 15.0)
                    min_segment_length = right_angle_config.get('min_segment_length', 5.0)
                    max_iterations = right_angle_config.get('max_iterations', 3)
                    
                    # 将掩码转换为多边形（包含抽稀和直角化处理）
                    mask_polygons = mask_to_polygon(
                        mask, 
                        window_geo_transform, 
                        RESULT_FILTER['simplify_tolerance'],
                        right_angle_enabled,
                        angle_tolerance,
                        min_segment_length,
                        max_iterations
                    )
                    
                    # 过滤面积过小的多边形
                    if mask_polygons:
                        for polygon, area in mask_polygons:
                            if area >= RESULT_FILTER['min_area'] * SLIDING_WINDOW.get('min_area_ratio', 0.5):
                                polygons.append((polygon, area))
                except Exception as e:
                    print(f"转换掩码为多边形时出错: {str(e)}")
                    continue
        except Exception as e:
            print(f"处理窗口失败: {e}")
        
        return polygons


# 创建一个全局实例，方便直接调用
crop_extractor = SAM2CropExtractor()


# 提供直接调用的函数

def extract_crop_fields_from_image(image_path: str, output_path: str = None, use_batch_processing: bool = None, batch_size: int = None, enable_tiling: bool = None) -> str:
    """
    从遥感影像中提取耕地地块的便捷函数（支持分幅处理）
    
    Args:
        image_path: 遥感影像路径
        output_path: 输出文件路径
        use_batch_processing: 是否使用批处理（None表示使用配置文件中的设置）
        batch_size: 批处理大小（None表示使用配置文件中的设置）
        enable_tiling: 是否启用分幅处理（None表示使用配置文件中的设置）
        
    Returns:
        str: 输出文件路径
    """
    return crop_extractor.extract_crop_fields(
        image_path, 
        output_path=output_path,
        use_batch_processing=use_batch_processing,
        batch_size=batch_size,
        enable_tiling=enable_tiling
    )