###  float模型量化后放到板端
import numpy as np
import pathlib
# 获取父路径
parent_path = pathlib.Path(__file__).parent.absolute()
# 拼接路径
out_path = parent_path / "../../Arm_STM32_FreedomLearn/freedomlearn"
# 解析路径（处理相对路径符号 `..`）
out_path = out_path.resolve()
# 加载模型参数
def load_model_parameters(file_path):
    data = np.load(file_path)
    w_i_h = data['w_i_h']
    w_h_o = data['w_h_o']
    b_i_h = data['b_i_h']
    b_h_o = data['b_h_o']
    print(f"模型参数已从 {file_path} 加载")
    return w_i_h, w_h_o, b_i_h, b_h_o

# 量化函数
def quantize(data, bits=8):
    max_val = np.max(np.abs(data))
    scale = (2 ** (bits - 1)) - 1  # 例如，int8 的范围是 [-127, 127]
    quantized_data = np.round(data / max_val * scale).astype(np.int8)
    return quantized_data, max_val

# 加载模型参数
w_i_h, w_h_o, b_i_h, b_h_o = load_model_parameters("./NeuralNetworkFromScratch-main/result/model_parameters.npz")
# 量化权重
w_i_h_quantized, w_i_h_scale = quantize(w_i_h)
w_h_o_quantized, w_h_o_scale = quantize(w_h_o)
b_i_h_quantized, b_i_h_scale = quantize(b_i_h)
b_h_o_quantized, b_h_o_scale = quantize(b_h_o)

# 保存量化后的权重和比例因子到一个文件
np.savez(
    f"{parent_path}/quantized_model_parameters.npz",
    w_i_h_quantized=w_i_h_quantized,
    w_h_o_quantized=w_h_o_quantized,
    b_i_h_quantized=b_i_h_quantized,
    b_h_o_quantized=b_h_o_quantized,
    w_i_h_scale=w_i_h_scale,
    w_h_o_scale=w_h_o_scale,
    b_i_h_scale=b_i_h_scale,
    b_h_o_scale=b_h_o_scale
)

print("量化后的权重和比例因子已保存到 quantized_model_parameters.npz")

# 将量化后的权重和比例因子写入单个 C 文件
def save_quantized_parameters_to_c(file_path, w_i_h_quantized, w_h_o_quantized, b_i_h_quantized, b_h_o_quantized,
                                  w_i_h_scale, w_h_o_scale, b_i_h_scale, b_h_o_scale):
    with open(file_path, "w") as f:
        f.write('#include "FreeRTOS.h"\n')
        # 写入 w_i_h_quantized
        f.write("int8_t w_i_h_quantized[] = {\n")
        for i, value in enumerate(w_i_h_quantized.flatten()):
            f.write(f"{value}, ")
            if (i + 1) % 10 == 0:  # 每行显示 10 个值
                f.write("\n")
        f.write("\n};\n\n")

        # 写入 w_h_o_quantized
        f.write("int8_t w_h_o_quantized[] = {\n")
        for i, value in enumerate(w_h_o_quantized.flatten()):
            f.write(f"{value}, ")
            if (i + 1) % 10 == 0:  # 每行显示 10 个值
                f.write("\n")
        f.write("\n};\n\n")

        # 写入 b_i_h_quantized
        f.write("int8_t b_i_h_quantized[] = {\n")
        for i, value in enumerate(b_i_h_quantized.flatten()):
            f.write(f"{value}, ")
            if (i + 1) % 10 == 0:  # 每行显示 10 个值
                f.write("\n")
        f.write("\n};\n\n")

        # 写入 b_h_o_quantized
        f.write("int8_t b_h_o_quantized[] = {\n")
        for i, value in enumerate(b_h_o_quantized.flatten()):
            f.write(f"{value}, ")
            if (i + 1) % 10 == 0:  # 每行显示 10 个值
                f.write("\n")
        f.write("\n};\n\n")

        # 写入比例因子
        f.write(f"float w_i_h_scale = {w_i_h_scale}f;\n")
        f.write(f"float w_h_o_scale = {w_h_o_scale}f;\n")
        f.write(f"float b_i_h_scale = {b_i_h_scale}f;\n")
        f.write(f"float b_h_o_scale = {b_h_o_scale}f;\n")

    print(f"量化后的权重和比例因子已保存到 {file_path}")

# 将量化后的权重和比例因子写入 C 文件
save_quantized_parameters_to_c(
    f"{out_path}/quantized_model_parameters.c",
    w_i_h_quantized, w_h_o_quantized, b_i_h_quantized, b_h_o_quantized,
    w_i_h_scale, w_h_o_scale, b_i_h_scale, b_h_o_scale
)