import argparse
import os
from onnx2tf import convert
from typing import Optional

def parse_args():
    parser = argparse.ArgumentParser(
        description="Convert ONNX to TFLite with optional quantization",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )

    parser.add_argument(
        "--model_path",
        type=str,
        required=True,
        help="Input ONNX model path"
    )

    parser.add_argument(
        "--output",
        type=str,
        required=True,
        help="Output directory for TFLite model"
    )

    parser.add_argument(
        "--quantize",
        type=str,
        choices=["none", "dynamic_range", "per_channel"],
        default="none",
        help="Quantization type"
    )

    parser.add_argument(
        "--img_size",
        type=int,
        nargs=2,
        metavar=("H", "W"),
        help="Input image dimensions for quantization calibration"
    )

    return parser.parse_args()

def main():
    args = parse_args()

    # 创建输出目录
    os.makedirs(args.output, exist_ok=True)

    # 基础转换参数
    convert_config = {
        "input_onnx_file_path": args.model_path,
        "output_folder_path": args.output,
        "quant_type": args.quantize,
        #"allow_custom_ops": True  # 允许自定义算子
    }

    

    # 动态形状支持配置
    if args.img_size:
        convert_config.update({
            "input_shape_override": [1, *args.img_size, 1],  # 假设通道数为3
            "disable_strict_mode": True,
            "dynamic_axes": {
                "input": {0: "batch_size", 1: "time_steps"},
                "cache": {0: "batch_size", 2: "padding"},
                "output": {0: "batch_size", 1: "time_steps"},
                "new_cache": {0: "batch_size", 2: "padding"}
            }
        })

    # 执行转换
    try:
        convert(**convert_config)
        print(f"✅ Conversion completed successfully!")
        print(f"TFLite model available at: {os.path.join(args.output, 'saved_model.tflite')}")

        # 量化配置提示
        if args.quantize == "per_channel":
            print(f"ℹ️ Per-channel quantization requires calibration data. "
                  f"Recommended to provide representative dataset using --img_size")

    except Exception as e:
        print(f"❌ Conversion failed: {str(e)}")
        exit(1)

if __name__ == "__main__":
    main()
