import math

import numpy as np
import time
import tensorrt as trt

import cv2

from dataset import ImagesDataset, ZipDataset
from dataset import augmentation as A
from torchvision import transforms as T
from torch import nn

import common
batch_size = 3
mode = 'fp16'

# You can set the logger severity higher to suppress messages (or lower to display more messages).
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)


class ModelData(object):
    MODEL_FILE = "./models/resnet101_base_" + mode + "_" +str(batch_size) + ".trt"


def build_engine(model_file, file_type='TRT'):
    if file_type == 'TRT':
        with open(model_file, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
            return runtime.deserialize_cuda_engine(f.read())

    # For more information on TRT basics, refer to the introductory samples.
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network,
                                                                                                               TRT_LOGGER) as parser:
        builder.max_workspace_size = common.GiB(15)
        builder.max_batch_size = batch_size

        # Parse the onnx network
        with open(model_file, 'rb') as model:
            parser.parse(model.read())
        config = builder.create_builder_config()
        profile = builder.create_optimization_profile()
        profile.set_shape('src', (-1, 3, 224, 224), (-1, 3, 1080, 1920), (-1, 3, 2160, 3840))
        profile.set_shape('bgr', (-1, 3, 224, 224), (-1, 3, 1080, 1920), (-1, 3, 2160, 3840))
        config.add_optimization_profile(profile)
        return builder.build_engine(network, config)


# Loads a test case into the provided pagelocked_buffer.
def load_normalized_test_case(data_paths, pagelocked_buffer):
    img = np.random.rand(1, 3, 256, 256).astype(np.float32)
    pagelocked_buffer = img
    return img


def load_input_pair(input_images_path):
    imgs = []
    for img_path in input_images_path:
        img = cv2.imread(img_path)
        img = np.transpose(np.expand_dims(img, 0), (0, 3, 1, 2))
        imgs.append(np.ascontiguousarray(img.astype(np.float32)))

    return imgs


def load_input_pair_v2(data_pair):
    src = np.expand_dims(data_pair[0].numpy().astype(np.float32), 0)
    bgr = np.expand_dims(data_pair[1].numpy().astype(np.float32), 0)
    return [src, bgr]

def get_input_pair_batches(data_set, batch_size):
    batch_num = math.ceil(len(data_set) / batch_size)
    batches = []
    for i in range(batch_num):
        batch = {'src': [], 'bgr': []}
        for j in range(i*batch_size, (i+1)*batch_size):
            batch['src'].append(data_set[j][0].numpy().astype(np.float32))
            batch['bgr'].append(data_set[j][1].numpy().astype(np.float32))
            if j >= len(data_set)-1:
                break
        batch['src'] = np.array(batch['src'])
        batch['bgr'] = np.array(batch['bgr'])
        batches.append(batch)

    return batches

def do_infer(data_set):
    with build_engine(ModelData.MODEL_FILE, file_type='TRT') as engine:
        # Build an engine, allocate buffers and create a stream.
        print(engine)
        print("max batch size:", engine.max_batch_size)
        print(engine.get_profile_shape(0, 0))
        # inputs, outputs, bindings, stream = common.allocate_buffers(engine)
        with engine.create_execution_context() as context:
            # Ishape = load_normalized_test_case('test.jpeg', pagelocked_buffer=inputs[0].host)
            # set_2nd_para(inputs[1].host)
            batches = get_input_pair_batches(data_set, batch_size)
            for b_idx in range(len(batches)):
                # input_imgs = load_input_pair_v2(data_set[idx])

                context.active_optimization_profile = 0
                context.set_binding_shape(0, (batch_size, 3, 1080, 1920))
                context.set_binding_shape(1, (batch_size, 3, 1080, 1920))

                for binding in range(len(engine)):
                    x = context.get_binding_shape(binding)
                    print("binding", binding, x)

                inputs, outputs, bindings, stream = common.allocate_buffers(engine, context)
                inputs[0].host = batches[b_idx]['src']
                inputs[1].host = batches[b_idx]['bgr']

                for i in range(1):
                    t = time.time()
                    keys = common.do_inference(context, bindings=bindings, inputs=inputs, outputs=outputs,
                                               stream=stream,
                                               batch_size=batch_size)
                    print(time.time() - t)

                pha, fgr = keys[0], keys[2] * 255
                pha = pha.reshape(batch_size, 1, 1080, 1920)
                pha = np.concatenate((pha, pha, pha))
                # imgs = fgr * np.not_equal(pha, 0)
                imgs = fgr.reshape(batch_size, 3, 1080, 1920)
                for im_idx in range(len(imgs)):
                    img = np.transpose(imgs[im_idx], (1, 2, 0))
                    cv2.imwrite("./output/test" + str(b_idx) + '_'+ str(im_idx) + ".png", img)


if __name__ == '__main__':
    dataset = ZipDataset([
        ImagesDataset("./images/src"),
        ImagesDataset("./images/bgr"),
    ], assert_equal_length=True, transforms=A.PairCompose([
        A.PairApply(nn.Identity()),
        A.PairApply(T.ToTensor())
    ]))

    do_infer(dataset)
