# encoding=utf-8

"""benchmark.py

    由于OpenVINO的benchmark需要编译，所以拆分main中的效率计算代码，并用openvino框架重新编写
"""
import time

import torch
import openvino as ov
import os
import subprocess
import re
from typing import List


def run_benchmark(model_path: str, shape: List[int]) -> float:
    command = f"benchmark_app -m {model_path} -d CPU -api async -t 15"
    command += f' -shape "[{",".join(str(x) for x in shape)}]"'
    cmd_output = subprocess.check_output(command, shell=True)  # nosec
    match = re.search(r"Throughput\: (.+?) FPS", str(cmd_output))
    return float(match.group(1))


def get_model_size(ir_path: str, m_type: str = "Mb") -> float:
    xml_size = os.path.getsize(ir_path)
    bin_size = os.path.getsize(os.path.splitext(ir_path)[0] + ".bin")
    for t in ["bytes", "Kb", "Mb"]:
        if m_type == t:
            break
        xml_size /= 1024
        bin_size /= 1024
    model_size = xml_size + bin_size
    return model_size



core = ov.Core()
fp32_model = core.compile_model(core.read_model("resnet18_fp32.xml"), device_name="AUTO")
int8_model = core.compile_model(core.read_model("resnet18_int8.xml"), device_name="AUTO")

IMAGE_SIZE = 64
BATCH_SIZE = 128
input_shape = (1, 3, IMAGE_SIZE, IMAGE_SIZE)
example_input = torch.randn(*input_shape).cpu()


"""
fp32 infer cost time: 0.005405299999999613
fp32 model size: 43.06930065155029MB
int8 infer cost time: 0.0022316000000000003
int8 model size: 10.944717407226562MB
"""

start = time.perf_counter()
out = fp32_model([example_input.numpy()])[fp32_model.output(0)]
print("fp32 infer cost time: {}".format(time.perf_counter() - start))
print("fp32 model size: {}MB".format(get_model_size("resnet18_fp32.xml")))

start = time.perf_counter()
out1 = int8_model([example_input.numpy()])[int8_model.output(0)]
print("int8 infer cost time: {}".format(time.perf_counter() - start))
print("int8 model size: {}MB".format(get_model_size("resnet18_int8.xml")))


"""model acc log
[Step 1] Prepare model and dataset
Accuracy@1 of original FP32 model: 55.52000045776367
Successfully downloaded and prepared dataset at: D:\Projects\p2024-alg-docker\ModelOptimize\nncf-example\cls-imagenet200-quantization-example\nncf\datasets\extracted\tiny-imagenet-200

[Step 2] Quantize model
Statistics collection ------------------------ 100% 300/300 • 0:00:15 • 0:00:00
INFO:nncf:Compiling and loading torch extension: quantized_functions_cuda...
INFO:nncf:Finished loading torch extension: quantized_functions_cuda
Applying Fast Bias correction ------------------ 100% 20/20 • 0:00:00 • 0:00:00
Validation: ------------------------------------ 100% 79/79 • 0:01:17 • 0:00:00
Accuracy@1 of initialized INT8 model: 54.935

[Step 3] Fine tune quantized model
Train epoch: 0
Fine tuning: --------------------------------- 100% 782/782 • 0:12:19 • 0:00:00
Validation: ------------------------------------ 100% 79/79 • 0:00:18 • 0:00:00
Accyracy@1 of INT8 model after 0 epoch finetuning: 56.992
Train epoch: 1
Fine tuning: --------------------------------- 100% 782/782 • 0:02:45 • 0:00:00
Validation: ------------------------------------ 100% 79/79 • 0:00:18 • 0:00:00
Accyracy@1 of INT8 model after 1 epoch finetuning: 57.130
Validation: ------------------------------------ 100% 79/79 • 0:00:19 • 0:00:00
Accuracy@1 of fine-tuned INT8 model: 57.130

"""