from concurrent import futures
import logging

import grpc
import numpy as np
import NVA_rpc_pb2
import NVA_rpc_pb2_grpc
import cv2
import argparse
import os
import sys
from pathlib import Path

import torch
import torch.backends.cudnn as cudnn
import detect as det
import time

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
                           increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync
import laneseg


# model = DetectMultiBackend(ROOT / 'yolov5s.pt', device=device, dnn=False)
# model = laneseg.yolop(confThreshold=0.25, nmsThreshold=0.45, objThreshold=0.5)


request_model = 0
model = None
model_list = ["Yolo_Vehicle", "LaneSeg"]
device = select_device('0')


def yolo_infer(img, model):
    im = np.transpose(img, (2, 0, 1))

    # 图片预处理
    inputs = (torch.from_numpy(im).float()).cuda()
    inputs = inputs / 255
    c, h, w = inputs.shape
    inputs = inputs.reshape((1, c, h, w))
    # 推理
    prediction = model(inputs)

    # 结果后处理
    prediction = non_max_suppression(prediction, conf_thres=0.15, iou_thres=0.15, max_det=1000)
    lw = max(round(sum(img.shape) / 2 * 0.003), 2)
    tf = max(lw - 1, 1)
    pred = prediction[0]

    # 结果打印在图片上
    for bbox in range(pred.size()[0]):
        label = f'{pred[bbox, 4]:.0f}: {pred[bbox, 5]:.2f}'
        p1, p2 = (int(pred[bbox, 0]), int(pred[bbox, 1])), (int(pred[bbox, 2]), int(pred[bbox, 3]))
        cv2.rectangle(img, p1, p2, det.colors(int(pred[bbox, 5]), True))
        w, h = cv2.getTextSize(label, 0, fontScale=lw / 6, thickness=tf)[0]  # text width, height
        outside = p1[1] - h - 3 >= 0  # label fits outside box
        cv2.putText(img, label, (p1[0], p1[1]), 0, lw / 6,
                    det.colors(int(pred[bbox, 5])))
    return img


def langseg_infer(img, model):
    prediction = model.detect(img)
    return prediction


class Greeter(NVA_rpc_pb2_grpc.GreeterServicer):
    def RPC_Infer(self, request, context):
        # RPC接受图片字节流并解码
        nparr = np.frombuffer(request.image, dtype=np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

        if request_model == 1:
            img = yolo_infer(img, model)
        elif request_model == 2:
            img = langseg_infer(img, model)
        else:
            img = img

        # 编码并转为字节流
        params = [cv2.IMWRITE_JPEG_QUALITY, 95]
        img_enc = cv2.imencode('.jpg', img, params)[1]
        img_enc = img_enc.tobytes()

        # RPC返回图片字节流
        return NVA_rpc_pb2.Infer_Reply(images=img_enc)

    def RPC_Select(self, request, context):
        # nparr = np.frombuffer(request.select, dtype=np.str_)
        nparr = request.select
        global request_model
        global model
        if nparr == model_list[0]:
            request_model = 1
            model = DetectMultiBackend(ROOT / 'yolov5s.pt', device=device, dnn=False)
            return NVA_rpc_pb2.Select_Reply(select=True)
        elif nparr == model_list[1]:
            request_model = 2
            model = laneseg.yolop(confThreshold=0.25, nmsThreshold=0.45, objThreshold=0.5)
            return NVA_rpc_pb2.Select_Reply(select=True)
        else:
            request_model = 0
            return NVA_rpc_pb2.Select_Reply(select=False)


def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    NVA_rpc_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    server.wait_for_termination()


if __name__ == '__main__':
    logging.basicConfig()
    print('start...')
    serve()


