import torch
from mobilenetv2_mini import mobilenetv2
from fuse_bn import fuse_module
from utils import save_tensor_as_text, save_tensor_as_c_header, c_header_collect, float_tensor2Q88
import os
from os import path

SAVED_MODEL = "mobilenetv2_on_cifar10_mini.pth"
TXT_PARAM_DIR = "txt_param"
C_PARAM_DIR = "c_param"
C_PARAM_DIR_INT16 = "c_param_int16"
if __name__ == '__main__':
    if not path.exists(TXT_PARAM_DIR):
        os.mkdir(TXT_PARAM_DIR)
    if not path.exists(C_PARAM_DIR):
        os.mkdir(C_PARAM_DIR)
    if not path.exists(C_PARAM_DIR_INT16):
        os.mkdir(C_PARAM_DIR_INT16)
    net = mobilenetv2()
    net.load_state_dict(torch.load(SAVED_MODEL))
    
    net = fuse_module(net)
    for n, p in net.named_parameters():
        print(n, p.shape)
        save_tensor_as_text(p, path.join(TXT_PARAM_DIR, n + ".txt"))
        save_tensor_as_c_header(p, n, C_PARAM_DIR)

        #################################################################################
        #!!!!!!!!!!!!!!!!!!!!!!!!!!! 注意 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
        # 对于INT16量化的情况 bias要提前乘以256(对应整数的左移8位), 以确保后续操作的正确性
        #################################################################################
        #len(p.shape) == 2 说明这是一个bias
        save_tensor_as_c_header(float_tensor2Q88(p*256 if len(p.shape)==1 else p), n, C_PARAM_DIR_INT16)    
        
    c_header_collect(C_PARAM_DIR, "mobilenetv2_param.h")
    c_header_collect(C_PARAM_DIR_INT16, "mobilenetv2_param.h")