"""
Usage -:
    $ python test.py 
"""
import argparse
import os
import platform
import sys
from pathlib import Path
import torch
import numpy as np

from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
                           increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh, letterbox)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode

import asyncio
import websockets

import requests  
import json
import io  

print("start")
# Set Param
weights='sim.pt'
visualize=False
hide_labels=False
hide_conf=False
save_dir='output'

# Load Model
#@smart_inference_mode()
device = select_device('')
model = DetectMultiBackend(weights, device='cpu', dnn=False, data='sim.yaml', fp16=False)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size((640, 640), s=stride)  # check image size 
count = 0

async def echo(websocket, path) :
    async for message in websocket :
        global count 
        count = count + 1
        print("I got your message: {}".format(message))
        data = json.loads(message)  
        image_url = "http://192.168.22.199:9000/"+ data['Mission']+"/"+data['id']+"/"+data['FileName']
        binary_im0 = requests.get(image_url)  
        if True:  
            # binary to cv2  
            width = 800
            height = 600
            image_stream = binary_im0.content
            im0 = cv2.imdecode(np.asarray(bytearray(image_stream), dtype=np.uint8), cv2.IMREAD_COLOR)  
            im = letterbox(im0, (640, 640), stride=stride, auto=True)[0]  # padded resize
            im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
            im = np.ascontiguousarray(im)  # contiguous
            # Run inference
            model.warmup(imgsz=(1, 3, *imgsz))  # warmup
            im = torch.from_numpy(im).to(model.device)
            im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
            im /= 255  # 0 - 255 to 0.0 - 1.0
            if len(im.shape) == 3:
                im = im[None]  # expand for batch dim
            # Inference

            visualize = False
            visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
            pred = model(im, augment=False, visualize=visualize)
            pred = non_max_suppression(pred, 0.25, 0.45, None, False, max_det=100)
            bbox_list = []  
            for i, det in enumerate(pred):  # per image
                annotator = Annotator(im0, line_width=3, example=str(names))
                if len(det):
                    # Rescale boxes from img_size to im0 size
                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
                    gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                    # Write results
                    for *xyxy, conf, cls in reversed(det):
                        res = dict()
                        res['id'] = int(cls)
                        res['xywh'] = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                        bbox_list.append(res) 
                        print("class id:")
                        print(int(cls))
                        print("xywh:")
                        print(res['xywh'])
                        c = int(cls)  # integer class
                        label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
                        annotator.box_label(xyxy, label, color=colors(c, True))
            cv2.imwrite(str(count)+'.png', im0)
                    	
            res_dict = {
                "id": data['id'],
                "res": bbox_list,
                "url":str(count)+'.png'
                }  
            res_json = json.dumps(res_dict) 
        await websocket.send(res_json)

asyncio.get_event_loop().run_until_complete(websockets.serve(echo, '0.0.0.0',8765))
asyncio.get_event_loop().run_forever()