"""
从 HuggingFace 下载 ONNX 模型并分析参数
支持动态 batch 检测、输入输出信息提取等

"""

import argparse
import json
import hashlib
from pathlib import Path
from typing import Dict, Any, List, Optional
import numpy as np

try:
    from huggingface_hub import hf_hub_download, list_repo_files
    import onnxruntime as ort
    import onnx
except ImportError as e:
    print(f"❌ 缺少依赖: {e}")
    print("请安装: pip install huggingface_hub onnxruntime onnx")
    exit(1)


def compute_sha256(file_path: Path) -> str:
    """计算文件的 SHA256 哈希值"""
    sha256_hash = hashlib.sha256()
    with open(file_path, "rb") as f:
        for byte_block in iter(lambda: f.read(4096), b""):
            sha256_hash.update(byte_block)
    return sha256_hash.hexdigest()


def download_model(model_name: str, repo_id: str, filename: Optional[str], output_dir: Path) -> Path:
    """
    从 HuggingFace 下载模型，不对下载后的 onnx 文件重命名
    Args:
        model_name: 模型名称
        repo_id: HuggingFace 仓库 ID
        filename: 模型文件名
        output_dir: 输出目录
    Returns:
        下载的文件路径
    """
    output_dir.mkdir(parents=True, exist_ok=True)

    print(f"📥 下载模型: {model_name} {repo_id}/{filename}")
    try:
        # 下载模型
        model_path = hf_hub_download(
            repo_id=repo_id,
            filename=filename,
            local_dir=output_dir,
        )

        model_path = Path(model_path)  # 将下载的文件路径转换为 Path 对象，便于后续操作
        print(f"✅ 下载完成: {model_name} {model_path}")

        return model_path
    except Exception as e:
        print(f"❌ 下载失败: {model_name} {e}")
        return None


def check_dynamic_batch(model_path: Path) -> Dict[str, Any]:
    """
    检查模型是否支持动态 batch
    
    Returns:
        包含动态 batch 信息的字典
    """
    result = {
        "supports_dynamic_batch": False,
        "batch_dim_info": {},
        "test_results": {}
    }
    
    try:
        # 检查 ONNX 模型定义
        model = onnx.load(str(model_path))
        
        for input_tensor in model.graph.input:
            name = input_tensor.name
            shape = input_tensor.type.tensor_type.shape
            
            if len(shape.dim) > 0:
                batch_dim = shape.dim[0]
                
                if batch_dim.dim_param:
                    # 动态维度（dim_param）
                    result["supports_dynamic_batch"] = True
                    result["batch_dim_info"][name] = {
                        "type": "dynamic",
                        "dim_param": batch_dim.dim_param,
                        "full_shape": [d.dim_value if d.dim_value else d.dim_param for d in shape.dim]
                    }
                elif batch_dim.dim_value == 0:
                    # 动态维度（dim_value=0）
                    result["supports_dynamic_batch"] = True
                    result["batch_dim_info"][name] = {
                        "type": "dynamic",
                        "dim_value": 0,
                        "full_shape": [d.dim_value if d.dim_value else "?" for d in shape.dim]
                    }
                else:
                    # 固定维度
                    result["batch_dim_info"][name] = {
                        "type": "fixed",
                        "dim_value": batch_dim.dim_value,
                        "full_shape": [d.dim_value for d in shape.dim]
                    }
        

    except Exception as e:
        result["error"] = str(e)
    
    return result


def analyze_model(model_path: Path) -> Dict[str, Any]:
    """
    分析模型的所有参数
    
    Returns:
        包含模型所有信息的字典
    """
    model_info = {
        "model_path": str(model_path),
        "file_size_mb": round(model_path.stat().st_size / (1024 * 1024), 2),
        "sha256": compute_sha256(model_path)
    }
    
    try:
        # 使用 ONNX 库获取基本信息
        onnx_model = onnx.load(str(model_path))
        model_info["opset_version"] = onnx_model.opset_import[0].version if onnx_model.opset_import else None
        model_info["ir_version"] = onnx_model.ir_version
        
        # 获取算子列表
        ops_used = set()
        for node in onnx_model.graph.node:
            ops_used.add(node.op_type)
        model_info["operators"] = sorted(list(ops_used))
        model_info["operator_count"] = len(ops_used)
        
        # 使用 ONNX Runtime 获取输入输出信息
        session = ort.InferenceSession(str(model_path), providers=['CPUExecutionProvider'])
        
        # 输入信息
        model_info["inputs"] = []
        for input_meta in session.get_inputs():
            input_info = {
                "name": input_meta.name,
                "shape": list(input_meta.shape),
                "type": input_meta.type,
                "dtype": input_meta.type.split('(')[1].split(')')[0] if '(' in input_meta.type else None
            }
            model_info["inputs"].append(input_info)
        
        # 输出信息
        model_info["outputs"] = []
        for output_meta in session.get_outputs():
            output_info = {
                "name": output_meta.name,
                "shape": list(output_meta.shape),
                "type": output_meta.type,
                "dtype": output_meta.type.split('(')[1].split(')')[0] if '(' in output_meta.type else None
            }
            model_info["outputs"].append(output_info)
        
        # 检查动态 batch 支持
        dynamic_batch_info = check_dynamic_batch(model_path)
        model_info["dynamic_batch"] = dynamic_batch_info
        
        # 推断任务类型（基于模型名称和结构）
        model_name_lower = model_path.stem.lower()
        if "bert" in model_name_lower or "transformer" in model_name_lower:
            model_info["inferred_task"] = "text_classification"
        elif "resnet" in model_name_lower or "mobilenet" in model_name_lower or "vit" in model_name_lower:
            model_info["inferred_task"] = "classification"
        else:
            model_info["inferred_task"] = "unknown"
        
    except Exception as e:
        model_info["error"] = str(e)
        import traceback
        model_info["traceback"] = traceback.format_exc()
    
    return model_info


def format_for_registry(model_info: Dict[str, Any], model_name: str, relative_path: str, task: str, area: str) -> Dict[str, Any]:
    """
    将模型信息格式化为项目 registry 格式
    """
    # 提取输入信息
    inputs = {}
    for inp in model_info.get("inputs", []):
        input_name = inp["name"]
        shape = inp["shape"]
        
        # 推断数据类型
        dtype_map = {
            "tensor(float)": "float32",
            "tensor(double)": "float64",
            "tensor(int64)": "int64",
            "tensor(int32)": "int32"
        }
        dtype = dtype_map.get(inp["type"], "float32")
        
        # 推断 layout
        if len(shape) == 4:
            layout = "NCHW"
        elif len(shape) == 2:
            layout = "NC"
        else:
            layout = "N" + "".join(["?" for _ in range(len(shape) - 1)])
        inputs[input_name] = {
            "shape": shape,
            "dtype": dtype,
            "layout": layout,
            "batched": model_info.get("dynamic_batch", {}).get("supports_dynamic_batch", False)
        }
    
    # 提取输出名称
    outputs = [out["name"] for out in model_info.get("outputs", [])]
    
    registry_entry = {
        "name": model_name,
        "source_onnx": relative_path,
        "task": task,
        "area": area,
        "opset": model_info.get("opset_version"),
        "sha256": model_info.get("sha256"),
        "inputs": inputs,
        "outputs": outputs
    }
    
    # 添加预处理信息（如果是图像分类任务）
    if task == "classification":
        registry_entry["preprocessing"] = {
            "mean": [0.485, 0.456, 0.406],
            "std": [0.229, 0.224, 0.225],
            "input_range": [0.0, 1.0]
        }
    else:
        registry_entry["preprocessing"] = None

    return registry_entry   


def main():
    parser = argparse.ArgumentParser(
        description="从 HuggingFace 下载 ONNX 模型并分析参数",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
示例用法:
  # 使用字典文件
  python scripts/download_and_analyze_models.py --models scripts/models.json --output models/onnx
  
  # 使用单个模型
  python scripts/download_and_analyze_models.py --repo-id onnx-community/resnet-50-ONNX --output models/onnx

models.json 格式:
{
  "resnet50": {
    "repo_id": "onnx-community/resnet-50-ONNX",
    "filename": "model.onnx"
  },
  "mobilenetv2": {
    "repo_id": "onnxmodelzoo/mobilenetv2_100_Opset17",
    "filename": "mobilenetv2_100_Opset17.onnx"
  }
}
        """
    )
    
    parser.add_argument(
        "--models",
        type=str,
        help="模型配置 JSON 文件路径（包含 {name: {repo_id, filename}} 字典）"
    )
    parser.add_argument(
        "--repo-id",
        type=str,
        help="单个模型的 HuggingFace 仓库 ID"
    )
    parser.add_argument(
        "--filename",
        type=str,
        default=None,
        help="模型文件名（如果为 None，自动查找）"
    )
    parser.add_argument(
        "--name",
        type=str,
        default=None,
        help="模型名称（用于单个模型下载）"
    )
    parser.add_argument(
        "--output",
        type=str,
        default="models/onnx",
        help="输出目录"
    )
    parser.add_argument(
        "--format",
        choices=["json", "registry", "both"],
        default="both",
        help="输出格式：json（详细信息）、registry（项目注册表格式）、both（两者）"
    )
    parser.add_argument(
        "--registry-output",
        type=str,
        default=None,
        help="Registry 格式输出文件路径（默认：{output}/model_registry.json）"
    )
    
    args = parser.parse_args()
    
    output_dir = Path(args.output)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 加载模型配置
    models_config = {}
    
    if args.models:
        # 从 JSON 文件加载
        with open(args.models, 'r', encoding='utf-8') as f:
            models_config = json.load(f)
    elif args.repo_id:
        # 单个模型
        model_name = args.name or args.repo_id.split('/')[-1]
        models_config = {
            model_name: {
                "repo_id": args.repo_id,
                "filename": args.filename
            }
        }
    else:
        print("❌ 错误: 必须提供 --models 或 --repo-id")
        parser.print_help()
        exit(1)
    
    # 下载和分析每个模型
    all_results = {}
    registry_entries = {}
    
    print("=" * 80)
    print("开始下载和分析模型")
    print("=" * 80)
    print()
    
    for model_name, config in models_config.items():
        print(f"\n{'='*80}")
        print(f"📦 处理模型: {model_name}")
        print(f"{'='*80}")
        
        try:
            repo_id = config["repo_id"]
            filename = config.get("filename")
            task = config.get("task")
            area = config.get("area")
            # 下载模型
            model_path = download_model(model_name,repo_id, filename, output_dir)
            
            if model_path is None:
                print(f"❌ 跳过模型 {model_name}：下载失败")
                continue
            
            # 分析模型
            print(f"\n🔍 分析模型...")
            model_info = analyze_model(model_path)
            
            # 计算相对路径（用于 registry）
            relative_path = str(model_path.as_posix())
            
            # 保存详细结果
            all_results[model_name] = model_info
            
            # 生成 registry 格式
            if args.format in ["registry", "both"]:
                registry_entry = format_for_registry(model_info, model_name, relative_path, task, area)
                registry_entries[model_name] = registry_entry
            
            # 打印摘要
            print(f"\n📊 模型摘要:")
            print(f"  文件路径: {model_path}")
            print(f"  文件大小: {model_info.get('file_size_mb')} MB")
            print(f"  SHA256: {model_info.get('sha256')}")
            print(f"  Opset 版本: {model_info.get('opset_version')}")
            print(f"  算子数量: {model_info.get('operator_count')}")
            
            print(f"\n📥 输入信息:")
            for inp in model_info.get("inputs", []):
                print(f"  - {inp['name']}: shape={inp['shape']}, type={inp['type']}")
            
            print(f"\n📤 输出信息:")
            for out in model_info.get("outputs", []):
                print(f"  - {out['name']}: shape={out['shape']}, type={out['type']}")
            
            dynamic_info = model_info.get("dynamic_batch", {})
            if dynamic_info.get("supports_dynamic_batch"):
                print(f"\n✅ 动态 Batch 支持: 是")
            else:
                print(f"\n❌ 动态 Batch 支持: 否")
            
        except Exception as e:
            print(f"\n❌ 处理模型 {model_name} 时出错: {e}")
            import traceback
            traceback.print_exc()
            all_results[model_name] = {
                "error": str(e),
                "traceback": traceback.format_exc()
            }
    
    # 保存结果文件
    print("\n" + "=" * 80)
    print("保存结果")
    print("=" * 80)
    
    # 保存 JSON 格式结果
    if args.format in ["json", "both"]:
        json_output = output_dir / "model_analysis.json"
        print(f"\n💾 保存详细分析结果: {json_output}")
        with open(json_output, 'w', encoding='utf-8') as f:
            json.dump(all_results, f, indent=2, ensure_ascii=False)
        print(f"✅ JSON 结果已保存: {json_output}")
    
    # 保存 Registry 格式结果
    if args.format in ["registry", "both"]:
        if registry_entries:
            registry_output = Path(args.registry_output) if args.registry_output else output_dir / "model_registry.json"
            registry_output.parent.mkdir(parents=True, exist_ok=True)
            print(f"\n💾 保存 Registry 格式结果: {registry_output}")
            with open(registry_output, 'w', encoding='utf-8') as f:
                json.dump(registry_entries, f, indent=2, ensure_ascii=False)
            print(f"✅ Registry 结果已保存: {registry_output}")
        else:
            print("\n⚠️  没有可用的 Registry 格式数据")
    
    # 打印总结
    print("\n" + "=" * 80)
    print("处理完成")
    print("=" * 80)
    
    successful_models = [name for name, info in all_results.items() if "error" not in info]
    failed_models = [name for name, info in all_results.items() if "error" in info]
    
    print(f"\n📊 处理总结:")
    print(f"  总计: {len(all_results)} 个模型")
    print(f"  ✅ 成功: {len(successful_models)} 个")
    if failed_models:
        print(f"  ❌ 失败: {len(failed_models)} 个")
        for name in failed_models:
            print(f"    - {name}")
    
    if successful_models:
        print(f"\n✅ 成功处理的模型:")
        for name in successful_models:
            info = all_results[name]
            print(f"  - {name}: {info.get('file_size_mb', 'N/A')} MB, "
                  f"Opset {info.get('opset_version', 'N/A')}")
    
    print("\n✨ 所有操作完成!")


if __name__ == "__main__":
    main()