import time
import tensorrt as trt
from tqdm import trange
import numpy as np

import pycuda.driver as cuda
from torchvision.transforms.functional import to_tensor
from PIL import Image

import common
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=3)
parser.add_argument('--mode', type=str, default='fp16', choices=['fp32', 'fp16'])
args = parser.parse_args()

batch_size = args.batch_size
mode = args.mode
print("batch size:", batch_size, "mode:", mode)

# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)


class ModelData(object):
    MODEL_FILE = "./models/resnet101_base_" + mode + "_" +str(batch_size) + ".trt"


def build_engine(model_file, file_type='TRT'):
    if file_type == 'TRT':
        with open(model_file, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())


def get_input_pair_batch(img_path, batch_size):
    src = to_tensor(Image.open(img_path['src'])).unsqueeze(0).repeat(batch_size, 1, 1, 1).numpy().astype(np.float32)
    bgr = to_tensor(Image.open(img_path['bgr'])).unsqueeze(0).repeat(batch_size, 1, 1, 1).numpy().astype(np.float32)
    batches = {'src': src, 'bgr': bgr}

    return batches


def do_benchmark():
    with build_engine(ModelData.MODEL_FILE, file_type='TRT') as engine:
        # Build an engine, allocate buffers and create a stream.
        print(engine)
        print("max batch size:", engine.max_batch_size)
        # inputs, outputs, bindings, stream = common.allocate_buffers(engine)
        with engine.create_execution_context() as context:
            batch = get_input_pair_batch({'src': "./images/src/0.jpg", 'bgr': "./images/bgr/0.jpg"}, batch_size)

            context.active_optimization_profile = 0
            context.set_binding_shape(0, (batch_size, 3, 1080, 1920))
            context.set_binding_shape(1, (batch_size, 3, 1080, 1920))

            inputs, outputs, bindings, stream = common.allocate_buffers(engine, context)
            inputs[0].host = batch['src']
            inputs[1].host = batch['bgr']

            for binding in range(len(engine)):
                x = context.get_binding_shape(binding)
                # print("binding", binding, x)

            [cuda.memcpy_htod(inp.device, inp.host) for inp in inputs]

            for _ in trange(1000):
                # t = time.time()
                common.do_only_inference(context, bindings=bindings)
                # print(time.time() - t)


if __name__ == '__main__':
    do_benchmark()
