#!/usr/bin/env python
"""
卫星模型批量压缩工具
"""

import os
import argparse
import json
import torch
import importlib
from tqdm import tqdm
import sys
import logging

# 添加父目录到路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))

# 导入卫星模型压缩器
from uamcf.applications.satellite import SatelliteModelCompressor

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger("BatchCompressor")


def get_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="卫星模型批量压缩工具")
    parser.add_argument("--config", "-c", required=True,
                        help="配置文件路径 (JSON)")
    parser.add_argument("--output-dir", "-o", default="./output",
                        help="输出目录")
    parser.add_argument("--device", "-d", default=None,
                        help="使用的设备 ('cpu' 或 'cuda')")
    return parser.parse_args()


def load_model(model_info):
    """加载模型"""
    try:
        task_type = model_info["task_type"]
        model_type = model_info["model_type"]
        model_path = model_info.get("model_path", None)

        logger.info(f"加载 {model_type} 模型用于 {task_type}...")

        # 根据任务类型和模型类型加载模型
        if task_type == "classification":
            # 分类模型导入
            if model_type == "resnet18":
                from torchvision.models import resnet18
                model = resnet18(pretrained=False)
            elif model_type == "resnet50":
                from torchvision.models import resnet50
                model = resnet50(pretrained=False)
            elif model_type == "efficientnet_b0":
                from torchvision.models import efficientnet_b0
                model = efficientnet_b0(pretrained=False)
            elif model_type == "vit_small":
                from torchvision.models import vit_b_16
                model = vit_b_16(pretrained=False)
            else:
                # 尝试通过动态导入加载模型
                try:
                    module_path = model_info.get("module_path", "models.classification")
                    module = importlib.import_module(module_path)
                    model_class = getattr(module, model_type)
                    model = model_class()
                except (ImportError, AttributeError) as e:
                    raise ValueError(f"不支持的分类模型: {model_type}. 错误: {str(e)}")

        elif task_type == "detection":
            # 检测模型导入
            if model_type == "yolov5s":
                model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=False)
            else:
                # 尝试通过动态导入加载模型
                try:
                    module_path = model_info.get("module_path", "models.detection")
                    module = importlib.import_module(module_path)
                    model_class = getattr(module, model_type)
                    model = model_class()
                except (ImportError, AttributeError) as e:
                    raise ValueError(f"不支持的检测模型: {model_type}. 错误: {str(e)}")

        else:
            # 其他任务类型，尝试通过动态导入加载模型
            try:
                module_path = model_info.get("module_path", f"models.{task_type}")
                module = importlib.import_module(module_path)
                model_class = getattr(module, model_type)
                model = model_class()
            except (ImportError, AttributeError) as e:
                raise ValueError(f"不支持的模型: {model_type} 用于任务 {task_type}. 错误: {str(e)}")

        # 如果提供了模型路径，加载权重
        if model_path and os.path.exists(model_path):
            state_dict = torch.load(model_path, map_location='cpu')
            # 处理不同格式的权重文件
            if isinstance(state_dict, dict) and 'state_dict' in state_dict:
                state_dict = state_dict['state_dict']
            model.load_state_dict(state_dict, strict=False)
            logger.info(f"从 {model_path} 加载模型权重")

        model.eval()
        return model

    except Exception as e:
        logger.error(f"加载模型时出错: {str(e)}")
        return None


def create_dataloader(data_info):
    """创建数据加载器"""
    try:
        task_type = data_info["task_type"]
        data_dir = data_info["data_dir"]
        batch_size = data_info.get("batch_size", 8)

        logger.info(f"为 {task_type} 任务创建数据加载器...")

        # 根据任务类型创建数据加载器
        if task_type == "classification":
            from dataloaders.classification import create_dataloader
        elif task_type == "detection":
            from dataloaders.detection import create_dataloader
        elif task_type == "segmentation":
            from dataloaders.segmentation import create_dataloader
        elif task_type == "change_detection":
            from dataloaders.change_detection import create_dataloader
        elif task_type == "cloud_removal":
            from dataloaders.cloud import create_dataloader
        elif task_type == "super_resolution":
            from dataloaders.super_resolution import create_dataloader
        else:
            raise ValueError(f"不支持的任务类型: {task_type}")

        return create_dataloader(data_dir, batch_size=batch_size)

    except Exception as e:
        logger.error(f"创建数据加载器时出错: {str(e)}")
        logger.info("使用演示数据集进行演示。")

        # 创建一个虚拟数据集作为后备
        from torch.utils.data import DataLoader, Dataset

        class DummyDataset(Dataset):
            def __init__(self, size=100, task_type="classification"):
                self.size = size
                self.task_type = task_type

            def __len__(self):
                return self.size

            def __getitem__(self, idx):
                # 根据任务类型返回适当的数据格式
                if self.task_type == "classification":
                    return torch.rand(3, 224, 224), torch.randint(0, 10, (1,)).item()
                elif self.task_type == "detection":
                    # 返回图像和目标框
                    img = torch.rand(3, 640, 640)
                    target = {"boxes": torch.tensor([[100, 100, 200, 200]]),
                              "labels": torch.tensor([1])}
                    return img, target
                elif self.task_type == "segmentation":
                    # 返回图像和分割掩码
                    return torch.rand(3, 512, 512), torch.randint(0, 7, (512, 512))
                elif self.task_type == "change_detection":
                    # 返回两个时间点图像和变化掩码
                    return torch.rand(3, 256, 256), torch.rand(3, 256, 256), torch.randint(0, 2, (256, 256))
                elif self.task_type in ["cloud_removal", "super_resolution"]:
                    # 返回低质量和高质量图像对
                    return torch.rand(3, 256, 256), torch.rand(3, 256, 256)
                else:
                    return torch.rand(3, 224, 224), torch.rand(3, 224, 224)

        return DataLoader(DummyDataset(task_type=task_type), batch_size=batch_size)


def main():
    """主函数"""
    args = get_args()

    # 加载配置文件
    with open(args.config, 'r') as f:
        config = json.load(f)

    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)

    # 获取设备
    if args.device:
        device = torch.device(args.device)
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    logger.info(f"使用设备: {device}")

    # 记录压缩结果
    compression_results = {}

    # 循环处理所有模型
    for model_config in tqdm(config["models"], desc="压缩模型中"):
        model_name = model_config["name"]
        logger.info(f"\n处理模型: {model_name}")

        # 加载模型
        model = load_model(model_config)
        if model is None:
            logger.warning(f"由于加载错误跳过 {model_name}")
            continue

        # 移动模型到指定设备
        model = model.to(device)

        # 创建数据加载器
        dataloader = create_dataloader(model_config)

        # 创建卫星模型压缩器
        compressor = SatelliteModelCompressor(
            task_type=model_config["task_type"],
            model_type=model_config["model_type"],
            device=device
        )

        # 设置压缩参数
        compression_ratio = model_config.get("compression_ratio", 0.5)
        quality_threshold = model_config.get("quality_threshold", 0.9)
        method = model_config.get("method", None)
        custom_config = model_config.get("custom_config", None)

        # 压缩模型
        try:
            compressed_model, stats = compressor.compress(
                model,
                dataset=dataloader,
                compression_ratio=compression_ratio,
                quality_threshold=quality_threshold,
                method=method,
                custom_config=custom_config
            )

            # 保存压缩后的模型
            output_path = os.path.join(args.output_dir, f"{model_name}_compressed.pth")
            torch.save(compressed_model, output_path)
            logger.info(f"压缩模型保存到: {output_path}")

            # 记录结果
            compression_results[model_name] = {
                "original_size_mb": stats.get("original_size", 0) / (1024 * 1024),
                "compressed_size_mb": stats.get("compressed_size", 0) / (1024 * 1024),
                "compression_ratio": stats.get("size_reduction", 1.0),
                "performance_change": stats.get("accuracy_change", 0.0) * 100,  # 转为百分比
                "latency_improvement": stats.get("latency_improvement", 1.0)
            }

            # 输出压缩统计
            logger.info("\n压缩统计信息:")
            if 'size_reduction' in stats:
                logger.info(f"  大小减少: {stats['size_reduction']:.2f}倍")
            if 'accuracy_change' in stats:
                logger.info(f"  性能变化: {stats['accuracy_change'] * 100:+.2f}%")
            if 'latency_improvement' in stats:
                logger.info(f"  延迟改进: {stats['latency_improvement']:.2f}倍")

        except Exception as e:
            logger.error(f"压缩模型 {model_name} 时出错: {str(e)}")
            import traceback
            traceback.print_exc()
            compression_results[model_name] = {"error": str(e)}

        # 保存压缩结果摘要
    results_path = os.path.join(args.output_dir, "compression_results.json")
    with open(results_path, 'w') as f:
        json.dump(compression_results, f, indent=2)

    logger.info(f"\n压缩结果保存到: {results_path}")
    logger.info("批量压缩完成!")


if __name__ == "__main__":
    main()