import tensorflow as tf
import numpy as np

# =========================
# 配置部分：根据你的实际情况修改
# =========================
SAVED_MODEL_DIR = r"C:\Users\nanak\Desktop\weather-classification\saved_model_with_signature"  # 你的 SavedModel 目录
OUTPUT_FP32 = "model_fp32.tflite"
OUTPUT_WEIGHT_ONLY = "model_weight_quant.tflite"
OUTPUT_INT8 = "best_int8.tflite"

# 模拟 representative dataset，你需要替换成真实数据
# 每次 yield 一个符合输入 shape 的 numpy array
def representative_dataset():
    for i in range(10):  # 先只给 10 条，确认能跑通
        dummy_input = np.random.rand(1, 224, 224, 3).astype(np.float32)
        yield [dummy_input]

# 打印详细日志，便于观察卡在哪个步骤
tf.get_logger().setLevel('INFO')


# =========================
# Step 1: FP32 转换（验证流程）
# =========================
# print("\n=== Step 1: FP32 转换 ===")
# converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_DIR)
# tflite_model = converter.convert()
# with open(OUTPUT_FP32, "wb") as f:
#     f.write(tflite_model)
# print(f"✅ FP32 模型已保存到 {OUTPUT_FP32}")


# =========================
# Step 2: 权重量化（不量化激活）
# =========================
# print("\n=== Step 2: 权重量化（不量化激活） ===")
# converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_DIR)
# converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
# try:
#     tflite_model = converter.convert()
#     with open(OUTPUT_WEIGHT_ONLY, "wb") as f:
#         f.write(tflite_model)
#     print(f"✅ 权重量化模型已保存到 {OUTPUT_WEIGHT_ONLY}")
# except Exception as e:
#     print(f"❌ 权重量化失败：{e}")


# =========================
# Step 3: 小样本全量化（INT8）
# =========================
print("\n=== Step 3: 小样本全量化（INT8） ===")
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_DIR)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8  # 或者 tf.uint8，取决于你的需求
converter.inference_output_type = tf.int8
try:
    tflite_model = converter.convert()
    with open(OUTPUT_INT8, "wb") as f:
        f.write(tflite_model)
    print(f"✅ INT8 模型已保存到 {OUTPUT_INT8}")
except Exception as e:
    print(f"❌ INT8 量化失败：{e}")
