import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import cv2

USE_FP16 = False
target_dtype = np.float16 if USE_FP16 else np.float32

BATCH_SIZE = 10
img = cv2.imread("./img/drawing.jpg")[:,:,::-1]
img = cv2.resize(img,(224,224))
input_batch = 255*np.array(np.repeat(np.expand_dims(np.array(img, dtype=np.float32), axis=0), BATCH_SIZE, axis=0), dtype=np.float32)
input_batch = input_batch.astype(target_dtype)

f = open("./data/nsfw_engine.trt", "rb")
runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))

engine = runtime.deserialize_cuda_engine(f.read())
context = engine.create_execution_context()

output = np.empty([BATCH_SIZE, 2], dtype = target_dtype) # Need to set output dtype to FP16 to enable FP16

# Allocate device memory
d_input = cuda.mem_alloc(1 * input_batch.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()


def predict(batch):  # result gets copied into output
    output = np.empty([BATCH_SIZE, 2], dtype=target_dtype)  # Need to set output dtype to FP16 to enable FP16
    # Transfer input data to device
    cuda.memcpy_htod_async(d_input, batch, stream)
    # Execute model
    context.execute_async_v2(bindings, stream.handle, None)
    # Transfer predictions back
    cuda.memcpy_dtoh_async(output, d_output, stream)
    # Syncronize threads
    stream.synchronize()
    return output

# print("Warming up...")
#
# trt_predictions = predict(input_batch).astype(np.float32)
# print(trt_predictions)
#
# print("Done warming up!")

from image_utils import create_vidio_image_loader
import time
import cv2

class NSFW:

    def __init__(self):
        self.fn_load_image = create_vidio_image_loader()
        self.model = predict
        self.batch = BATCH_SIZE

    def predictVidio(self,vidio_path):
        sample_rate = 1  # 每隔100帧采样一次
        # 打开视频文件
        cap = cv2.VideoCapture(vidio_path)
        frame_count = 0
        batch = 20
        images = []
        totalNum = 0
        while True:
            ret, frame = cap.read()
            if not ret:
                break
            frame_count += 1
            if frame_count % sample_rate == 0:
                images.append(frame)
                if len(images) < batch:
                    continue
                # 处理采样帧
                images_input = self.fn_load_image(images)
                predictions = self.model(images_input).astype(np.float32)
                print(predictions)
                totalNum += len(images)
                images = []
        print("frame num:{}".format(totalNum))


if __name__ == "__main__":
    nsfw = NSFW()
    # nsfw.savedModelFormat("./data/savedModelFormat")
    total_start_time = time.time()
    data_result = nsfw.predictVidio("./img/big-buck-bunny_trailer.mp4")
    print(time.time()-total_start_time)
