import pickle
import sys
import numpy as np
import onnx
import onnxruntime as rt
import site

# 添加自定义模块搜索路径
site.addsitedir("./inc")
site.addsitedir("./inc/linux/")

# 导入优化器、校准器和评估器模块
import optimizer
import calibrator
import evaluator

# 宏定义
MODEL_COEFFICIENT = 'mnist_coefficient'
TARGET_DEVICE = 'esp32s3'

# 文件路径配置
mnist_test_data_path = './data/mnist_test_data.pickle'
model_path = './data/model.onnx'
pickle_file_path = './data/mnist_calib.pickle'

# 优化ONNX模型
def optimize_model(model_path):
    """优化ONNX模型，并返回优化后的模型路径"""
    optimized_model_path = optimizer.optimize_fp_model(model_path)
    print(f"optimized_model_path: {optimized_model_path}")
    return optimized_model_path

# 加载和预处理测试数据
def load_test_data(test_data_path):
    """加载MNIST测试数据并进行归一化处理"""
    with open(test_data_path, 'rb') as f:
        (test_images, test_labels) = pickle.load(f)
    # 将像素值缩放到[0, 1]区间
    test_images = test_images / 255.0
    # 增加通道维度 [B, W, H] -> [B, W, H, C], 其中C为1
    test_images = np.expand_dims(test_images, axis=3)
    return test_images, test_labels

# 准备校准数据集
def prepare_calib_dataset(test_images, step=50, num_samples=5000):
    """从测试数据中提取部分样本作为校准数据集"""
    calib_dataset = test_images[0:num_samples:step]
    return calib_dataset

# 生成量化表
def generate_quantization_table(model_proto, calib_dataset, pickle_file_path):
    """生成量化表并导出到C++代码"""
    print('Generating the quantization table:')
    calib = calibrator.Calibrator('int16', 'per-tensor', 'minmax')
    calib.set_providers(['CPUExecutionProvider'])
    print("generate_quantization_table Input data shape:", calib_dataset.shape)
    # 生成量化表
    calib.generate_quantization_table(model_proto, calib_dataset, pickle_file_path)
    # 导出系数到C++代码，使用宏定义的变量
    calib.export_coefficient_to_cpp(model_proto, pickle_file_path, TARGET_DEVICE, '.', MODEL_COEFFICIENT, True)

# 评估量化模型性能
def evaluate_performance(test_images, test_labels, model_proto, optimized_model_path, pickle_file_path):
    """在目标设备上评估量化模型和浮点模型的性能"""
    print(f'Evaluating the performance on {TARGET_DEVICE}:')
    eva = evaluator.Evaluator('int16', 'per-tensor', TARGET_DEVICE)
    eva.set_providers(['CPUExecutionProvider'])
    # 生成量化后的模型
    eva.generate_quantized_model(model_proto, pickle_file_path)

    # 创建浮点模型的推理会话
    output_names = [n.name for n in model_proto.graph.output]
    providers = ['CPUExecutionProvider']
    m = rt.InferenceSession(optimized_model_path, providers=providers)

    # 分批评估模型精度
    batch_size = 100
    batch_num = len(test_images) // batch_size
    int8_correct = 0
    fp32_correct = 0
    input_name = m.get_inputs()[0].name

    for i in range(batch_num):
        batch_start = i * batch_size
        batch_end = (i + 1) * batch_size
        batch_images = test_images[batch_start:batch_end]
        batch_labels = test_labels[batch_start:batch_end]

        # 量化模型推理
        outputs, _ = eva.evalute_quantized_model(batch_images, False)
        int8_correct += np.sum(np.argmax(outputs[0], axis=1) == batch_labels)

        # 浮点模型推理
        fp_outputs = m.run(output_names, {input_name: batch_images.astype(np.float32)})
        fp32_correct += np.sum(np.argmax(fp_outputs[0], axis=1) == batch_labels)

    # 输出模型精度
    int8_accuracy = int8_correct / len(test_images)
    fp32_accuracy = fp32_correct / len(test_images)
    print(f'Accuracy of int8 model is: {int8_accuracy:.4f}')
    print(f'Accuracy of fp32 model is: {fp32_accuracy:.4f}')

# 主流程
def main():
    # 优化模型
    optimized_model_path = optimize_model(model_path)
    # 加载测试数据
    test_images, test_labels = load_test_data(mnist_test_data_path)
    print(f'Test images shape: {test_images.shape}')

    # 准备校准数据集
    calib_dataset = prepare_calib_dataset(test_images)

    # 加载优化后的模型
    model_proto = onnx.load(optimized_model_path)

    # 生成量化表
    generate_quantization_table(model_proto, calib_dataset, pickle_file_path)

    # 评估性能
    evaluate_performance(test_images, test_labels, model_proto, optimized_model_path, pickle_file_path)

if __name__ == '__main__':
    main()
