#!/usr/bin/env python
# coding: utf-8

# In[1]:


import sys
import torch



# In[4]:


# export ONNX model for onnxruntime
# get_ipython().system('python export.py --weights ./yolov7-tiny.pt --grid --end2end --simplify      --topk-all 100 --iou-thres 0.65 --conf-thres 0.35      --img-size 640 640      --dynamic-batch      --max-wh 7680')



import cv2
import time
import random
import numpy as np
import onnxruntime as ort
from PIL import Image
from pathlib import Path
from collections import OrderedDict,namedtuple


# In[6]:


cuda = True
w = "yolov7-tiny.onnx"
imgList = [cv2.imread('inference/images/horses.jpg'),
           cv2.imread('inference/images/bus.jpg'),
           cv2.imread('inference/images/zidane.jpg'),
           cv2.imread('inference/images/image1.jpg'),
           cv2.imread('inference/images/image2.jpg'),
           cv2.imread('inference/images/image3.jpg')]
imgList*=6
imgList = imgList[:32]


# In[7]:


providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = ort.InferenceSession(w, providers=providers)


# In[8]:


names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
         'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
         'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
         'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
         'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
         'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
         'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
         'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
         'hair drier', 'toothbrush']
colors = {name:[random.randint(0, 255) for _ in range(3)] for i,name in enumerate(names)}


# In[9]:


def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP)
        r = min(r, 1.0)

    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, r, (dw, dh)


origin_RGB = []
resize_data = []
for img in imgList:
  img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  origin_RGB.append(img)
  image = img.copy()
  image, ratio, dwdh = letterbox(image, auto=False)
  image = image.transpose((2, 0, 1))
  image = np.expand_dims(image, 0)
  image = np.ascontiguousarray(image)
  im = image.astype(np.float32)
  resize_data.append((im,ratio,dwdh))


np_batch = np.concatenate([data[0] for data in resize_data])



# In[12]:


outname = [i.name for i in session.get_outputs()]
outname


# In[13]:


inname = [i.name for i in session.get_inputs()]
inname


# In[14]:


# batch 1 infer
im = np.ascontiguousarray(np_batch[0:1,...]/255)
out = session.run(outname,{'images':im})
out


# In[15]:


# batch 4 infer
im = np.ascontiguousarray(np_batch[0:4,...]/255)
out = session.run(outname,{'images':im})
out


# In[16]:


# batch 6 infer
im = np.ascontiguousarray(np_batch[0:6,...]/255)
out = session.run(outname,{'images':im})
out


# In[17]:


# batch 32 infer
im = np.ascontiguousarray(np_batch/255)
out = session.run(outname,{'images':im})[0]


# In[18]:


for i,(batch_id,x0,y0,x1,y1,cls_id,score) in enumerate(out):
    if batch_id >= 6:
        break
    image = origin_RGB[int(batch_id)]
    ratio,dwdh = resize_data[int(batch_id)][1:]
    box = np.array([x0,y0,x1,y1])
    box -= np.array(dwdh*2)
    box /= ratio
    box = box.round().astype(np.int32).tolist()
    cls_id = int(cls_id)
    score = round(float(score),3)
    name = names[cls_id]
    color = colors[name]
    name += ' '+str(score)
    cv2.rectangle(image,box[:2],box[2:],color,2)
    cv2.putText(image,name,(box[0], box[1] - 2),cv2.FONT_HERSHEY_SIMPLEX,0.75,[225, 255, 255],thickness=2)


# In[19]:


Image.fromarray(origin_RGB[0])


# In[20]:


Image.fromarray(origin_RGB[1])


# In[21]:


Image.fromarray(origin_RGB[2])


# In[22]:


Image.fromarray(origin_RGB[3])


# In[23]:


Image.fromarray(origin_RGB[4])


# In[24]:


Image.fromarray(origin_RGB[5])


# In[ ]:




