
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import numpy as np
import pathlib

# 加载CIFAR-10数据集
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# 数据归一化
x_train, x_test = x_train / 255.0, x_test / 255.0

# 将标签转换为独热编码
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)

# 将输入数据转换为 int8 类型（适用于量化）
x_train = (x_train * 255).astype(np.int8)  # 将数据转换为 [0, 255] 范围的整数
x_test = (x_test * 255).astype(np.int8)
# # 创建深度可分离卷积神经网络模型（模仿MobileNetV1）
# model = models.Sequential()

# # 第一个卷积层：深度可分离卷积，stride=1，保持32x32尺寸
# model.add(layers.InputLayer(input_shape=(32, 32, 3)))
# model.add(layers.DepthwiseConv2D(3, padding='same', activation='relu', strides=1))  # 32x32 -> 32x32
# model.add(layers.BatchNormalization())
# model.add(layers.Conv2D(32, kernel_size=1, activation='relu'))

# # 第二个卷积层：深度可分离卷积，stride=2，尺寸从32x32降到16x16
# model.add(layers.DepthwiseConv2D(3, padding='same', activation='relu', strides=2))  # 32x32 -> 16x16
# model.add(layers.BatchNormalization())
# model.add(layers.Conv2D(64, kernel_size=1, activation='relu'))

# # 第三个卷积层：深度可分离卷积，stride=1，保持16x16尺寸
# model.add(layers.DepthwiseConv2D(3, padding='same', activation='relu', strides=1))  # 16x16 -> 16x16
# model.add(layers.BatchNormalization())
# model.add(layers.Conv2D(64, kernel_size=1, activation='relu'))

# # 第四个卷积层：深度可分离卷积，stride=2，尺寸从16x16降到8x8
# model.add(layers.DepthwiseConv2D(3, padding='same', activation='relu', strides=2))  # 16x16 -> 8x8
# model.add(layers.BatchNormalization())
# model.add(layers.Conv2D(128, kernel_size=1, activation='relu'))

# # 第五个卷积层：深度可分离卷积，stride=1，保持8x8尺寸
# model.add(layers.DepthwiseConv2D(3, padding='same', activation='relu', strides=1))  # 8x8 -> 8x8
# model.add(layers.BatchNormalization())
# model.add(layers.Conv2D(128, kernel_size=1, activation='relu'))

# # 全局平均池化，降维到1维
# model.add(layers.GlobalAveragePooling2D())

# # 全连接层
# model.add(layers.Dense(128, activation='relu'))
# model.add(layers.Dropout(0.5))
# model.add(layers.Dense(10, activation='softmax'))

# # 编译模型
# model.compile(optimizer='adam',
#               loss='categorical_crossentropy',
#               metrics=['accuracy'])

# # 输出模型结构
# model.summary()

# # 训练模型
# history = model.fit(x_train, y_train, epochs=150, batch_size=64, validation_data=(x_test, y_test))

# # 在测试集上评估模型
# test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
# print(f'Test accuracy: {test_acc}')

# # 保存训练好的模型为 .keras 格式
# saved_model_path = 'model_cifar10.keras'  # 保存路径
# model.save(saved_model_path)

# print(f"模型已保存为 .keras 格式，路径：{saved_model_path}")


# # 加载保存的 .h5 模型
# model = tf.keras.models.load_model('model_cifar10.keras')

# # 创建TFLite转换器
# converter = tf.lite.TFLiteConverter.from_keras_model(model)

# # 设置量化选项，进行整数量化
# converter.optimizations = [tf.lite.Optimize.DEFAULT]  # 优化选项
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]  # 设置仅支持int8运算
# converter.inference_input_type = tf.int8  # 输入量化为int8
# converter.inference_output_type = tf.int8  # 输出量化为int8

# # 代表性数据集，用于量化校准
# def representative_dataset_gen():
#     for i in range(100):
#         yield [x_train[i:i+1].astype(np.float32)]  # 每次返回1个数据进行校准

# converter.representative_dataset = representative_dataset_gen
# # THIS ONE
# # 执行转换
# tflite_model = converter.convert()

# # 创建一个 pathlib.Path 对象来保存文件
# tflite_model_path = pathlib.Path('model_quantized_cifar10.tflite')

# # 保存 TFLite 模型
# tflite_model_path.write_bytes(tflite_model)

# print("模型已保存为 TFLite 格式，路径：", tflite_model_path)

# Helper function to run inference on a TFLite model
def run_tflite_model(tflite_file, test_image_indices):
    global x_test, y_test  # 使用 CIFAR-10 测试集

    # Initialize the interpreter
    interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()[0]
    output_details = interpreter.get_output_details()[0]

    predictions = np.zeros((len(test_image_indices),), dtype=int)

    # Iterate over test images
    for i, test_image_index in enumerate(test_image_indices):
        test_image = x_test[test_image_index]
        test_label = y_test[test_image_index]
        print(f"test_label: {test_label}")

        # Preprocess the image: CIFAR-10 images are 32x32x3, and model expects (32, 32, 3)
        # Ensure the image is of the right shape
        test_image = test_image.astype(np.float32)  # Convert to float32 for rescaling
        
        # If the model input is quantized, convert the input to the appropriate format (int8)
        if input_details['dtype'] == np.uint8:
            input_scale, input_zero_point = input_details["quantization"]
            test_image = test_image / input_scale + input_zero_point
            test_image = np.clip(test_image, 0, 255)  # Ensure it's within uint8 range
            test_image = test_image.astype(np.uint8)

        # Expand the dimensions to match the model's input shape (batch size of 1)
        test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
        # print(f"test_image: {test_image}")
        # Set the tensor (input) and run inference
        interpreter.set_tensor(input_details["index"], test_image)
        interpreter.invoke()

        # Get the output tensor
        output = interpreter.get_tensor(output_details["index"])[0]
        print(f"OUTPUT: {output}")
        # Get the predicted class index
        predictions[i] = output.argmax()  # Take the class with the highest probability

    return predictions

# 示例：选择测试集中的前 10 张图片来进行推理
test_image_indices = list(range(10))  # 假设要测试前 10 张图片
tflite_model_path = 'model_quantized_cifar10.tflite'

# 调用函数进行推理
predictions = run_tflite_model(tflite_model_path, test_image_indices)

# 打印预测结果
print(predictions)



# Initialize the interpreter

try:
    interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
    interpreter.allocate_tensors()
    print("Model loaded and tensors allocated successfully.")
except Exception as e:
    print(f"Error loading model or allocating tensors: {e}")
    raise



# 获取输入和输出张量的详细信息
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# 获取所有中间层的详细信息
tensor_details = interpreter.get_tensor_details()
# 假设我们使用一张测试图片
test_image = x_test[0]
test_image = np.expand_dims(test_image, axis=0)  # 扩展维度以符合模型输入要求
# print(f"test_image: {test_image}")
# 将输入张量设置为模型输入
interpreter.set_tensor(input_details[0]['index'], test_image.astype(np.int8))
# 运行推理（前向传播）
interpreter.invoke()



# 获取中间层的详细信息
intermediate_details = interpreter.get_tensor_details()
for detail in intermediate_details:
    # print(detail)
# 打开一个文件以写入（如果文件不存在，则会创建一个新的文件）
# with open('tensor_details_cifar10.txt', 'w') as f:
#     # 遍历所有的 tensor details 并写入文件
#     for detail in tensor_details:
#         f.write(str(detail) + '\n')  # 将每个细节转换为字符串并写入文件
# 提取并保存每层的权重（张量）
for tensor_detail in intermediate_details:
    tensor_index = tensor_detail['index']
    tensor_name = tensor_detail['name']
    tensor_data = interpreter.get_tensor(tensor_index)

    # 将权重（张量数据）保存为 .txt 文件
    # with open(f'{tensor_name}_weights_cifar10.txt', 'w') as f:
    #     # 将每个权重张量保存为文本格式
    #     f.write(f"Layer: {tensor_name}\n")
    #     f.write(f"Shape: {tensor_data.shape}\n")
    #     f.write("Weights:\n")
    #     np.savetxt(f, tensor_data.flatten(), fmt='%.6f')  # 保存权重数据为文本