import tensorflow as tf

class Quantizer:
    def __init__(self, model_path, output_bits=8):
        self.model = tf.keras.models.load_model(model_path)
        self.output_bits = output_bits
        
    def quantize_layer(self, layer):
        # 获取权重统计信息
        w_min = tf.reduce_min(layer.weights)
        w_max = tf.reduce_max(layer.weights)
        
        # 动态量化方案
        scale = (w_max - w_min) / (2**self.output_bits - 1)
        zero_point = tf.round(-w_min / scale)
        
        # 应用量化
        quant_weights = tf.quantization.fake_quant_with_min_max_args(
            layer.weights, 
            min=w_min,
            max=w_max,
            num_bits=self.output_bits
        )
        return quant_weights.numpy().astype(np.int8)
        
    def convert_model(self):
        # 遍历所有卷积/全连接层
        for layer in self.model.layers:
            if isinstance(layer, (tf.keras.layers.Dense, tf.keras.layers.Conv2D)):
                q_weights = self.quantize_layer(layer)
                layer.set_weights([q_weights])
        
        # 生成硬件头文件
        with open('model_params.h', 'w') as f:
            f.write("#pragma once\n")
            for i, w in enumerate(self.model.weights):
                f.write(f"const int8_t WEIGHT_{i}[] = {{{','.join(map(str, w.flatten()))}}};\n")