import numpy as np
import random
import socket
from models import ctrbox_net
import torch
import cv2
import decoder
from datasets.DOTA_devkit.ResultMerge_multi_process import py_cpu_nms_poly_fast, py_cpu_nms_poly


CAPTURE_IN = 1
ROS_SERVER_ON = True
if ROS_SERVER_ON:
    socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    socket_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    socket_server.bind(('', 9095))
    socket_server.listen(5)
    print('Start listening on port 9095 ...')
    client_socket, client_address = socket_server.accept()
    print('Got my client')

INPUT_IMG_LIST = [
    '/home/jario/dataset/dota2.0_val_split_600x600_scale_1_05/images/P0003__1__547___423.png',
    '/home/jario/dataset/dota2.0_val_split_600x600_scale_1_05/images/P6687__1__4000___1000.png'
]
resume = 'weights_dota/model_50.pth'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
num_classes = {'dota': 15, 'hrsc': 1}
selected_dataset = 'dota'
input_w, input_h = 800, 450
category = [
    'plane',
    'baseball-diamond',
    'bridge',
    'ground-track-field',
    'small-vehicle',
    'large-vehicle',
    'ship',
    'tennis-court',
    'basketball-court',
    'storage-tank',
    'soccer-ball-field',
    'roundabout',
    'harbor',
    'swimming-pool',
    'helicopter'
]
category_colors = [
    [220, 20, 60],
    [119, 11, 32],
    [0, 0, 142],
    [0, 0, 230],
    [106, 0, 228],
    [0, 60, 100],
    [0, 80, 100],
    [0, 0, 70],
    [0, 0, 192],
    [250, 170, 30],
    [100, 170, 30],
    [220, 220, 0],
    [175, 116, 175],
    [250, 0, 30],
    [165, 42, 42]
]
category_colors = {cat: category_colors[i] for i, cat in enumerate(category)}
heads = {'hm': num_classes[selected_dataset],
         'wh': 10,
         'reg': 2,
         'cls_theta': 1
         }
down_ratio = 4
model = ctrbox_net.CTRBOX(heads=heads,
                          pretrained=True,
                          down_ratio=down_ratio,
                          final_kernel=1,
                          head_conv=256)

checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch']))
state_dict_ = checkpoint['model_state_dict']
model.load_state_dict(state_dict_, strict=False)
model = model.to(device)
model.eval()

decoder = decoder.DecDecoder(K=500,
                             conf_thresh=0.1,
                             num_classes=num_classes[selected_dataset])


def processing_test(image):
    image = cv2.resize(image, (input_w, input_h))
    out_image = image.astype(np.float32) / 255.
    out_image = out_image - 0.5
    out_image = out_image.transpose(2, 0, 1).reshape(1, 3, input_h, input_w)
    out_image = torch.from_numpy(out_image)
    return out_image


def decode_prediction(predictions, ori_image, down_ratio):
    predictions = predictions[0, :, :]
    h, w, c = ori_image.shape

    pts0 = {cat: [] for cat in category}
    scores0 = {cat: [] for cat in category}
    for pred in predictions:
        cen_pt = np.asarray([pred[0], pred[1]], np.float32)
        tt = np.asarray([pred[2], pred[3]], np.float32)
        rr = np.asarray([pred[4], pred[5]], np.float32)
        bb = np.asarray([pred[6], pred[7]], np.float32)
        ll = np.asarray([pred[8], pred[9]], np.float32)
        tl = tt + ll - cen_pt
        bl = bb + ll - cen_pt
        tr = tt + rr - cen_pt
        br = bb + rr - cen_pt
        score = pred[10]
        clse = pred[11]
        pts = np.asarray([tr, br, bl, tl], np.float32)
        pts[:, 0] = pts[:, 0] * down_ratio / input_w * w
        pts[:, 1] = pts[:, 1] * down_ratio / input_h * h
        pts0[category[int(clse)]].append(pts)
        scores0[category[int(clse)]].append(score)
    return pts0, scores0


def non_maximum_suppression(pts, scores):
    nms_item = np.concatenate([pts[:, 0:1, 0],
                               pts[:, 0:1, 1],
                               pts[:, 1:2, 0],
                               pts[:, 1:2, 1],
                               pts[:, 2:3, 0],
                               pts[:, 2:3, 1],
                               pts[:, 3:4, 0],
                               pts[:, 3:4, 1],
                               scores[:, np.newaxis]], axis=1)
    nms_item = np.asarray(nms_item, np.float64)
    keep_index = py_cpu_nms_poly_fast(dets=nms_item, thresh=0.1)
    return nms_item[keep_index]


def vis_object_segs(ori_image, results, cat):
    ori_image_2 = ori_image.copy()
    for pred in results:
        score = pred[-1]
        tl = np.asarray([pred[0], pred[1]], np.float32)
        tr = np.asarray([pred[2], pred[3]], np.float32)
        br = np.asarray([pred[4], pred[5]], np.float32)
        bl = np.asarray([pred[6], pred[7]], np.float32)

        tt = (np.asarray(tl, np.float32) + np.asarray(tr, np.float32)) / 2
        rr = (np.asarray(tr, np.float32) + np.asarray(br, np.float32)) / 2
        bb = (np.asarray(bl, np.float32) + np.asarray(br, np.float32)) / 2
        ll = (np.asarray(tl, np.float32) + np.asarray(bl, np.float32)) / 2

        box = np.asarray([tl, tr, br, bl], np.float32)
        cen_pts = np.mean(box, axis=0)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tt[0]), int(tt[1])), (0, 0, 255), 1, 1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(rr[0]), int(rr[1])), (255, 0, 255), 1, 1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(bb[0]), int(bb[1])), (0, 255, 0), 1, 1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(ll[0]), int(ll[1])), (255, 0, 0), 1, 1)

        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tl[0]), int(tl[1])), (0,0,255),1,1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(tr[0]), int(tr[1])), (255,0,255),1,1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(br[0]), int(br[1])), (0,255,0),1,1)
        # cv2.line(ori_image, (int(cen_pts[0]), int(cen_pts[1])), (int(bl[0]), int(bl[1])), (255,0,0),1,1)

        ori_image_2 = cv2.fillPoly(ori_image_2, [np.int0(box)], category_colors[cat], 1)
        # ori_image_2 = cv2.fillPoly(ori_image_2, [np.int0(box)], (0, 255, 0), 1)

        ori_image = cv2.drawContours(ori_image, [np.int0(box)], -1, (255, 0, 255), 1, 1)
        # box = cv2.boxPoints(cv2.minAreaRect(box))
        # ori_image = cv2.drawContours(ori_image, [np.int0(box)], -1, (0,255,0),1,1)
        cv2.putText(ori_image, '{:.2f} {}'.format(score, cat), (int(box[1][0]), int(box[1][1])),
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 255), 1, 1)

    ori_image = cv2.addWeighted(ori_image, 0.5, ori_image_2, 0.5, 0)
    return ori_image


cap = cv2.VideoCapture(CAPTURE_IN)

frame_cnt = 0
while cap.isOpened():
    ret, ori_image = cap.read()
    frame_cnt += 1
    results = {cat: [] for cat in category}

    # ori_image = cv2.imread(img_fn)
    img = processing_test(ori_image).to(device)

    with torch.no_grad():
        pr_decs = model(img)

    decoded_pts = []
    decoded_scores = []
    torch.cuda.synchronize(device)
    predictions = decoder.ctdet_decode(pr_decs)
    pts0, scores0 = decode_prediction(predictions, ori_image, down_ratio)
    decoded_pts.append(pts0)
    decoded_scores.append(scores0)

    # nms
    n_boxes = 0
    for cat in category:
        if cat == 'background':
            continue
        pts_cat = []
        scores_cat = []
        for pts0, scores0 in zip(decoded_pts, decoded_scores):
            pts_cat.extend(pts0[cat])
            scores_cat.extend(scores0[cat])
        pts_cat = np.asarray(pts_cat, np.float32)
        scores_cat = np.asarray(scores_cat, np.float32)
        if pts_cat.shape[0]:
            nms_results = non_maximum_suppression(pts_cat, scores_cat)
            n_boxes += len(nms_results)
            results[cat].extend(nms_results)
            ori_image = vis_object_segs(ori_image, nms_results, cat)

    j = 0
    for i, cat in enumerate(category):
        for b in results[cat]:
            if ROS_SERVER_ON:
                # FrameID, 是否检测到目标(0/1,>1:num-of-objs), obj-order, 类别, x (0-1), y (0-1), w (0-1), h (0-1),
                # 置信度, 0:detecting-mode
                tl = np.asarray([b[0], b[1]], np.float32)
                tr = np.asarray([b[2], b[3]], np.float32)
                br = np.asarray([b[4], b[5]], np.float32)
                bl = np.asarray([b[6], b[7]], np.float32)
                box = np.asarray([tl, tr, br, bl], np.float32)
                cen_pts = np.mean(box, axis=0)
                bb = [0,0,0,0,0,0,0,0]
                for k in range(8):
                    if k in [0,2,4,6]:
                        bb[k] = b[k] / ori_image.shape[1]
                    else:
                        bb[k] = b[k] / ori_image.shape[0]
                    if bb[k] > 1.0:
                        bb[k] = 1.0
                    if bb[k] < 0.0:
                        bb[k] = 0.0

                client_socket.send(
                    '{:08d},{:03d},{:03d},{:03d},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:.3f},{:04d},{:04d},0'.format(
                        frame_cnt,
                        n_boxes,
                        j,
                        int(i + 1),
                        bb[0],
                        bb[1],
                        bb[2],
                        bb[3],
                        bb[4],
                        bb[5],
                        bb[6],
                        bb[7],
                        b[-1],
                        int(cen_pts[0]),
                        int(cen_pts[1])).encode('utf-8'))
                j += 1
                sent_it = True

    cv2.imshow('img', ori_image)
    cv2.waitKey(10)

