"""
卫星模型压缩器 - 针对不同卫星任务的模型压缩专用框架
"""

import os
import torch
import numpy as np
import logging
from typing import Dict, Any, Optional, Tuple

# 导入UAMCF核心组件
from uamcf import CompressionManager

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("SatelliteCompressor")


class SatelliteModelCompressor:
    """卫星模型压缩器 - 针对不同卫星任务的模型压缩专用框架"""

    def __init__(self, task_type, model_type=None, device=None):
        """
        初始化卫星模型压缩器

        Args:
            task_type: 卫星任务类型 ['classification', 'detection', 'segmentation',
                                  'change_detection', 'cloud_removal', 'super_resolution']
            model_type: 模型类型 (如 'resnet50', 'yolov5', 'unet', 等)
            device: 使用的设备 ('cuda' 或 'cpu')
        """
        self.task_type = task_type
        self.model_type = model_type

        # 初始化基础压缩管理器
        self.compression_manager = CompressionManager()

        # 设置设备
        if device is None:
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = torch.device(device)

        # 任务类型映射
        self.task_mapping = {
            'classification': self._get_classification_config,
            'detection': self._get_detection_config,
            'segmentation': self._get_segmentation_config,
            'change_detection': self._get_change_detection_config,
            'cloud_removal': self._get_cloud_removal_config,
            'super_resolution': self._get_super_resolution_config
        }

        # 验证任务类型
        if task_type not in self.task_mapping:
            raise ValueError(f"不支持的任务类型: {task_type}。支持的类型: {list(self.task_mapping.keys())}")

        logger.info(f"初始化卫星模型压缩器 - 任务类型: {task_type}, 模型类型: {model_type}")

    def compress(self, model, dataset=None, compression_ratio=0.5,
                 quality_threshold=0.9, method=None, custom_config=None):
        """
        压缩模型

        Args:
            model: 要压缩的模型
            dataset: 用于评估的数据集
            compression_ratio: 目标压缩比例 (0-1 之间，如0.5代表压缩到原来的一半)
            quality_threshold: 最低可接受的性能阈值，相对于原始模型 (0-1)
            method: 指定压缩方法，若为None则自动选择
            custom_config: 自定义压缩配置，覆盖默认配置

        Returns:
            压缩后的模型和压缩统计信息
        """
        # 确保模型在正确的设备上
        model = model.to(self.device)

        # 获取任务特定的压缩配置
        config_generator = self.task_mapping[self.task_type]
        compression_config = config_generator(model, self.model_type,
                                              compression_ratio, quality_threshold, method)

        # 应用自定义配置（如果有）
        if custom_config:
            compression_config = self._merge_configs(compression_config, custom_config)

        # 记录压缩前信息
        logger.info(f"开始压缩模型 - 目标压缩率: {compression_ratio}, 质量阈值: {quality_threshold}")
        logger.info(f"使用配置: {compression_config}")

        # 调用压缩管理器进行压缩 - 使用符合接口的参数
        compressed_model, stats = self.compression_manager.compress(
            model,
            constraints=compression_config["constraints"],  # 传递constraints参数
            adapter_name="pytorch",  # 使用adapter_name参数
            eval_data=dataset  # 传递evaluation data
        )

        # 记录压缩结果
        if 'size_reduction' in stats:
            logger.info(f"压缩完成 - 大小减少: {stats['size_reduction']:.2f}倍")
        if 'accuracy_change' in stats:
            logger.info(f"性能变化: {stats['accuracy_change'] * 100:+.2f}%")

        return compressed_model, stats

    def _merge_configs(self, base_config, custom_config):
        """合并配置，自定义配置优先级更高"""
        import copy
        merged = copy.deepcopy(base_config)

        # 合并constraints
        if 'constraints' in custom_config:
            for key, value in custom_config['constraints'].items():
                merged['constraints'][key] = value

        # 合并methods
        if 'methods' in custom_config:
            for method, config in custom_config['methods'].items():
                if method in merged['methods']:
                    for key, value in config.items():
                        merged['methods'][method][key] = value
                else:
                    merged['methods'][method] = config

        return merged

    def _get_classification_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为遥感图像分类任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio
            },
            "methods": {}
        }

        # 根据模型类型定制配置
        if method is None:
            # 自动选择方法
            if model_type and 'resnet' in model_type.lower():
                # ResNet特定配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.5,
                    "method": "magnitude",
                    "granularity": "channel"
                }
            elif model_type and ('efficient' in model_type.lower() or 'mobilenet' in model_type.lower()):
                # 高效轻量网络配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
            elif model_type and (
                    'vit' in model_type.lower() or 'transformer' in model_type.lower() or 'swin' in model_type.lower()):
                # Transformer配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "symmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,  # Transformer通常不需要太激进的剪枝
                    "method": "magnitude",
                    "granularity": "vector"
                }
            else:
                # 默认配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.4,
                    "method": "magnitude",
                    "granularity": "element"
                }
        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.7, 1.0 - compression_ratio),  # 基于压缩率调整剪枝率
                    "method": "magnitude",
                    "granularity": "channel" if model_type and 'resnet' in model_type.lower() else "element"
                }
            elif method == "distillation":
                config["methods"]["distillation"] = {
                    "temperature": 4.0,
                    "alpha": 0.5
                }
            elif method == "factorization":
                config["methods"]["factorization"] = {
                    "rank_ratio": compression_ratio,
                    "method": "svd"
                }

        return config

    def _get_detection_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为目标检测任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio
            },
            "methods": {}
        }

        # 目标检测模型特定配置
        if method is None:
            # 自动选择方法
            if model_type and 'yolo' in model_type.lower():
                # YOLO系列配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
                # YOLO对剪枝较敏感，尤其是小目标检测
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel",
                    "layer_sensitivity": True  # 考虑不同层的敏感性
                }
            elif model_type and ('rcnn' in model_type.lower() or 'faster' in model_type.lower()):
                # R-CNN系列配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                # 骨干网络部分可以应用更激进的剪枝
                config["methods"]["pruning"] = {
                    "sparsity": 0.4,
                    "method": "magnitude",
                    "granularity": "channel",
                    "preserve_detection_head": True  # 保留检测头部不剪枝
                }
            elif model_type and ('efficient' in model_type.lower() or 'retinanet' in model_type.lower()):
                # EfficientDet/RetinaNet配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.35,
                    "method": "magnitude",
                    "granularity": "channel"
                }
            else:
                # 默认目标检测配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel"
                }
        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.5, 1.0 - compression_ratio),  # 检测模型通常需要更保守的剪枝
                    "method": "magnitude",
                    "granularity": "channel",
                    "preserve_detection_head": True
                }

        return config

    def _get_segmentation_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为图像分割任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio,
                "apply_to_all_layers": True,  # 确保应用到所有层
                "force_compression": True,  # 强制压缩即使性能下降
                "aggressive_compression": True  # 使用更激进的压缩策略
            },
            "methods": {}
        }

        # 分割模型特定配置
        if method is None:
            # 自动选择方法 - 组合使用多种方法
            config["methods"]["quantization"] = {
                "bits": 4,  # 使用4位量化而不是8位
                "scheme": "asymmetric",
                "per_channel": True,
                "include_layers": [".*conv.*", ".*bn.*", ".*linear.*", ".*fc.*"],  # 包含所有卷积和全连接层
                "include_all_layers": True  # 尝试量化所有可量化的层
            }

            # 添加剪枝方法
            config["methods"]["pruning"] = {
                "sparsity": 0.6,  # 高稀疏度
                "method": "magnitude",
                "granularity": "channel",  # 通道级剪枝
                "min_channels": 2,  # 每层保留最少通道数
                "include_layers": [".*conv.*", ".*linear.*", ".*fc.*", ".*backbone.*"],
                "include_all_layers": True,  # 尝试剪枝所有可剪枝的层
                "exclude_layers": [".*classifier.*4"]  # 排除最终分类层以保持输出形状
            }

            # 视模型类型添加特定配置
            if model_type and 'unet' in model_type.lower():
                # U-Net系列配置
                config["methods"]["quantization"]["bits"] = 4
                config["methods"]["quantization"]["skip_connections"] = "preserve"  # 保留跳跃连接的完整性

            elif model_type and 'deeplab' in model_type.lower():
                # DeepLab系列配置
                config["methods"]["quantization"]["bits"] = 4
                config["methods"]["pruning"]["sparsity"] = 0.5  # 略微保守些
                config["methods"]["pruning"]["atrous_layers"] = "preserve"  # 保留空洞卷积层

            elif model_type and ('segformer' in model_type.lower() or 'transformer' in model_type.lower()):
                # Transformer分割模型配置
                config["methods"]["quantization"]["scheme"] = "symmetric"
                config["methods"]["pruning"]["sparsity"] = 0.4  # Transformer需要谨慎剪枝
                config["methods"]["pruning"]["granularity"] = "vector"

        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 4,  # 使用4位量化而不是8位
                    "scheme": "asymmetric",
                    "per_channel": True,
                    "include_all_layers": True
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.8, 1.0 - compression_ratio),  # 根据压缩率自动调整剪枝率
                    "method": "magnitude",
                    "granularity": "channel",
                    "skip_connections": "preserve",
                    "include_all_layers": True,
                    "exclude_layers": [".*classifier.*4"]  # 排除最终分类层
                }
            elif method == "distillation":
                config["methods"]["distillation"] = {
                    "temperature": 4.0,
                    "alpha": 0.5,
                    "feature_maps": True  # 分割任务中特征图蒸馏很重要
                }

            # 添加额外辅助压缩方法
            if method == "quantization":
                # 如果主方法是量化，添加辅助剪枝
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,  # 轻度剪枝
                    "method": "magnitude",
                    "granularity": "channel"
                }
            elif method == "pruning":
                # 如果主方法是剪枝，添加辅助量化
                config["methods"]["quantization"] = {
                    "bits": 6,  # 轻度量化
                    "scheme": "asymmetric"
                }

        return config

    def _get_change_detection_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为变化检测任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio
            },
            "methods": {}
        }

        # 变化检测模型特定配置
        if method is None:
            # 变化检测模型结构敏感性较高，较保守配置
            if model_type and 'siamese' in model_type.lower():
                # 孪生网络配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel",
                    "dual_branches": "symmetric"  # 对称剪枝双分支
                }
            elif model_type and ('stanet' in model_type.lower() or 'attention' in model_type.lower()):
                # 注意力机制变化检测模型
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel",
                    "preserve_attention": True  # 保留注意力机制
                }
            else:
                # 默认变化检测配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.25,  # 变化检测一般需要更保守的剪枝
                    "method": "magnitude",
                    "granularity": "channel"
                }
        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.4, 1.0 - compression_ratio),  # 变化检测模型需要更保守的剪枝
                    "method": "magnitude",
                    "granularity": "channel"
                }

        return config

    def _get_cloud_removal_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为云检测与云去除任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio
            },
            "methods": {}
        }

        # 云检测与去除模型特定配置
        if method is None:
            if model_type and 'cloudnet' in model_type.lower():
                # CloudNet配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.4,
                    "method": "magnitude",
                    "granularity": "channel"
                }
            elif model_type and (
                    'gan' in model_type.lower() or 'pix2pix' in model_type.lower() or 'cycle' in model_type.lower()):
                # GAN系列配置 - 需要分别处理生成器和判别器
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel",
                    "generator_only": True  # 只剪枝生成器，保留判别器完整
                }
            else:
                # 默认云检测配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.35,
                    "method": "magnitude",
                    "granularity": "channel"
                }
        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.5, 1.0 - compression_ratio),
                    "method": "magnitude",
                    "granularity": "channel"
                }

        return config

    def _get_super_resolution_config(self, model, model_type, compression_ratio, quality_threshold, method):
        """为图像超分辨率重建任务生成压缩配置"""
        config = {
            "constraints": {
                "accuracy_threshold": quality_threshold,
                "target_size": compression_ratio
            },
            "methods": {}
        }

        # 超分辨率模型特定配置
        if method is None:
            if model_type and ('srcnn' in model_type.lower() or 'fsrcnn' in model_type.lower()):
                # SRCNN系列配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.4,
                    "method": "magnitude",
                    "granularity": "channel"
                }
            elif model_type and ('edsr' in model_type.lower() or 'rcan' in model_type.lower()):
                # EDSR/RCAN配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.35,
                    "method": "magnitude",
                    "granularity": "channel",
                    "residual_blocks": "adaptive"  # 自适应处理残差块
                }
            elif model_type and ('swin' in model_type.lower() or 'transformer' in model_type.lower()):
                # SwinIR配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "symmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.25,  # Transformer结构需要更保守的剪枝
                    "method": "magnitude",
                    "granularity": "vector"
                }
            else:
                # 默认超分辨率配置
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric"
                }
                config["methods"]["pruning"] = {
                    "sparsity": 0.3,
                    "method": "magnitude",
                    "granularity": "channel"
                }
        else:
            # 使用指定方法
            if method == "quantization":
                config["methods"]["quantization"] = {
                    "bits": 8,
                    "scheme": "asymmetric",
                    "per_channel": True
                }
            elif method == "pruning":
                config["methods"]["pruning"] = {
                    "sparsity": min(0.5, 1.0 - compression_ratio),
                    "method": "magnitude",
                    "granularity": "channel"
                }
            elif method == "factorization":
                config["methods"]["factorization"] = {
                    "rank_ratio": compression_ratio,
                    "method": "svd",
                    "preserve_reconstruction": True  # 超分辨率对重建质量很敏感
                }

        return config