import time
import os
from GAN_RS.options import TestOptions
from GAN_RS.models import create_model
from GAN_RS.util import util
from SSD.data import base_transform, mb_cfg, UW_CLASSES
from SSD.layers.functions import Detect,PriorBox
import torch
import torch.backends.cudnn as cudnn
import cv2
import numpy as np
from threading import Thread

frame_pool = list()
image_det_display = list()
frame_wh = tuple()
frame_num = 0
Quit = False
reset_count = False
reset_id = False
write_image = False

opt = TestOptions().parse()
opt.nThreads = 2   # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip
color = {1: (255, 0, 0), 2:(0, 255, 255), 3:(0, 0, 255), 4:(128, 0, 128)}
if len(opt.dataroot) == 1:
    opt.dataroot = int(opt.dataroot)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cudnn.benchmark = True
############### GAN-RS ################
if opt.name != 'default':
    model = create_model(opt)
    model.setup(opt)
################ SSD ##################
if opt.model_dir:
    num_classes = len(UW_CLASSES) + 1
    prior = 'VOC_'+ str(opt.ssd_dim)
    if 'RefineDet' in opt.backbone and opt.ssd_dim == 512:
        prior += '_RefineDet'
    cfg = mb_cfg[prior]
    priorbox = PriorBox(cfg)
    with torch.no_grad():
        priors = priorbox.forward().to(device)
    mean = (128, 128, 128)
    if opt.model_dir:
        if opt.deform:
            from SSD.model.dualrefinedet_vggbn import build_net
            net = build_net('test', size=opt.ssd_dim, num_classes=num_classes, return_feature=(False, True)[opt.tub > 0],
                            c7_channel=1024, def_groups=opt.deform, multihead=opt.multihead, bn=opt.bn)
        else:
            from SSD.model.refinedet_vgg import build_net
            net = build_net('test', size=opt.ssd_dim, num_classes=num_classes, use_refine=opt.refine,
                            c7_channel=1024, bn=opt.bn, multihead=opt.multihead, return_feature=(False, True)[opt.tub > 0])
        print('loading model!')
        net.load_state_dict(torch.load(opt.model_dir))
        net.eval()
        net = net.to(device)
        print('Finished loading model!', opt.model_dir)
        detector = Detect(num_classes, 0, opt.top_k, opt.confidence_threshold, opt.nms_threshold, tub=opt.tub, tub_thresh=opt.tub_thresh,
                              tub_generate_score=opt.tub_generate_score, loss_hold_len=opt.loss_hold_len)
conf_list = opt.conf_list
tub_thresh = opt.tub_thresh

########################## Camra ##############################
class CamThread(Thread):
  def __init__(self, dataroot, fineSize=512, results_dir=None, writename=None, ):
    super(CamThread, self).__init__()
    global frame_wh
    self.cap = cv2.VideoCapture(dataroot)
    assert self.cap.isOpened()
    self.fineSize = fineSize
    self.results_dir = results_dir
    self.writename = writename
    frame_wh = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    if self.results_dir:
        if not os.path.exists(self.results_dir):
            os.mkdir(self.results_dir)
        if self.writename:
            txt_name = self.writename.split('.')[0]
            fps = self.cap.get(cv2.CAP_PROP_FPS)
            fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
            self.out = cv2.VideoWriter(os.path.join(self.results_dir, self.writename), fourcc, fps, frame_wh)
        else:
            txt_name = 'results.txt'
        self.results_file = open(os.path.join(self.results_dir, txt_name+'.txt'), 'w')
        self.count_file = open(os.path.join(self.results_dir, txt_name+'_count.txt'), 'w')

  def run(self):
      global image_det_display, frame_pool, frame_num, Quit, conf_list, tub_thresh, reset_id, write_image
      max_id = [torch.tensor(0.),]*len(UW_CLASSES)
      while(1):
        time_s = time.time()
        log_str = 'CT: '
        if len(image_det_display):
            log_str += ' resive a image, '
            image_ori = image_det_display[0].copy()
            if len(image_det_display) == 2:
                detections = torch.FloatTensor(image_det_display[1].size()).copy_(image_det_display[1], async=True)
                for j in range(1, detections.size(1)):
                    dets = detections[0, j, :]
                    if dets.sum() == 0:
                        continue
                    mask = dets[:, 0].gt(0.).expand(dets.size(-1), dets.size(0)).t()
                    dets = torch.masked_select(dets, mask).view(-1, dets.size(-1))
                    boxes = dets[:, 1:-1] if dets.size(-1) == 6 else dets[:, 1:]
                    identity = dets[:, -1] if dets.size(-1) == 6 else torch.zeros((dets.size(0), 1)).fill_(-1)
                    if torch.max(identity)+1 > max_id[j-1]:
                        max_id[j - 1] = torch.max(identity)+1
                    boxes[:, 0] *= frame_wh[0]
                    boxes[:, 2] *= frame_wh[0]
                    boxes[:, 1] *= frame_wh[1]
                    boxes[:, 3] *= frame_wh[1]
                    scores = dets[:, 0]
                    for b, s, d in zip(boxes, scores, identity):
                        w = b[2]-b[0]
                        h = b[3]-b[1]
                        if (w*h)/(frame_wh[0]*frame_wh[1]) > 0.1:
                            continue
                        if self.results_dir:
                            self.results_file.write(str(frame_num) + ' ' + str(j) + ' ' + str(s).split('(')[-1][:-1] + ' ' + str(b[0]).split('(')[-1][:-1] + ' ' + str(
                                b[1]).split('(')[-1][:-1] + ' ' + str(b[2]).split('(')[-1][:-1] + ' ' + str(b[3]).split('(')[-1][:-1] + ' ' + str(int(d)) + '\n')
                        if self.writename or opt.show_video:
                            cv2.rectangle(image_ori, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), color[j], thickness=1)
                            put_str = str(int(d)) + ':' + str(s).split('(')[-1][:-1]
                            cv2.putText(image_ori, put_str,
                                        (int(b[0]) + 10, int(b[1]) - 10), cv2.FONT_HERSHEY_DUPLEX, 0.5, color=color[j],
                                        thickness=1)
            if self.writename or opt.show_video:
                for i, total in enumerate(max_id):
                    cv2.putText(image_ori, UW_CLASSES[i]+':'+str(total).split('(')[-1][:-2],
                            (10,20*i+20), cv2.FONT_HERSHEY_DUPLEX, 0.5, color=color[i+1],
                            thickness=1)
                if opt.show_video:
                    cv2.imshow('test', image_ori)
            if self.results_dir:
                if self.writename:
                    self.out.write(image_ori.copy())
                if write_image:
                    cv2.imwrite(os.path.join(self.results_dir, str(frame_num) + '.jpg'), image_ori)
                    count_str = str(frame_num)
                    for cl, cl_num in zip(UW_CLASSES, max_id):
                        count_str += ', '+ cl + ', ' + str(cl_num).split('(')[-1][:-2]
                    self.count_file.write(count_str + '\n')
                    write_image = False
            image_det_display = list()
        else:
            ret, frame = self.cap.read()
            if ret:
                frame_norm = cv2.normalize(cv2.resize(frame, (self.fineSize, self.fineSize)), None, alpha=-1,
                                           beta=1,
                                           norm_type=cv2.NORM_MINMAX,
                                           dtype=cv2.CV_32FC3)
                frame_pool.append((cv2.resize(frame, frame_wh),
                                   torch.unsqueeze(torch.from_numpy(frame_norm[:, :, (2, 1, 0)]), dim=0).permute(
                                             0, 3, 1, 2).to(device)))
                if len(frame_pool) > opt.pool_size:
                    frame_pool = frame_pool[-opt.pool_size:]
                log_str += "pool length: " + str(len(frame_pool))
            else:
                log_str += "fail to read"

        time_e = time.time()
        log_str += ', ' + str(np.around(time_e - time_s, 4))
        key = cv2.waitKey(1)
        if key == 32:
            while(1):
                key_c = cv2.waitKey(1)
                if key_c == 32:
                    break
        elif key == 27:
            Quit = True
            if self.results_dir:
                self.results_file.close()
                self.count_file.close()
                if self.writename:
                    self.out.release()
            print('CamThread Quit')
            break
        elif key == 50: #2
            conf_list[0] = np.around(np.clip(conf_list[0] + 0.1, 0., 1.), 1)
        elif key == 49: #1
            conf_list[0] = np.around(np.clip(conf_list[0] - 0.1, 0., 1.), 1)
        elif key in [87, 119]: #w:
            conf_list[1] = np.around(np.clip(conf_list[1] + 0.1, 0., 1.), 1)
        elif key in [81, 113]: #q
            conf_list[1] = np.around(np.clip(conf_list[1] - 0.1, 0., 1.), 1)
        elif key in [83, 115]: #s
            conf_list[2] = np.around(np.clip(conf_list[2] + 0.1, 0., 1.), 1)
        elif key in [65, 97]: #a
            conf_list[2] = np.around(np.clip(conf_list[2] - 0.1, 0., 1.), 1)
        elif key in [88, 120]: #x:
            conf_list[3] = np.around(np.clip(conf_list[3] + 0.1, 0., 1.), 1)
        elif key in [90, 122]: #z:
            conf_list[3] = np.around(np.clip(conf_list[3] - 0.1, 0., 1.), 1)
        elif key == 46:  # .
            tub_thresh = np.around(np.clip(tub_thresh + 0.1, 0., 1.5), 1)
        elif key == 44:  # ,
            tub_thresh = np.around(np.clip(tub_thresh - 0.1, 0., 1.5), 1)
        elif key in [82, 114]:  # ,
            max_id = [torch.tensor(0.),]*len(UW_CLASSES)
            reset_id = True
        elif key in [75, 107]:  # k
            write_image = True
        log_str += ', conf_list=' + str(conf_list) + ', tub_thresh=' + str(tub_thresh)
        print(log_str)

def ResDet(self) :
    global frame_pool, image_det_display, frame_num, reset_id
    while(1):
        if Quit:
            print('ResDet Quit')
            break
        log_str = ''
        if len(frame_pool) < opt.pool_size:
            log_str += ' trans_frame_pool length < '+ str(opt.pool_size)
            continue
        else:
            frame_num += 1
            image_ori, data_tensor = frame_pool[0]
            frame_pool = frame_pool[1:]
            ######################## GAN-RS ######################
            time1 = time.time()
            if opt.name != 'default':
                data = {'A_paths': [], 'A': data_tensor}
                model.set_input(data)
                model.test()
                visuals = model.get_current_visuals()
                # image_numpy = util.tensor2im(visuals['fake_B']).astype(np.uint8).copy()[:,:,(2,1,0)]
                # image_det_display = [image_numpy, ]
                time2 = time.time()
                log_str += 'GAN_RS: ' + str(np.around(time2 - time1, 4))
            ######################### SSD #########################
            if opt.model_dir:
                time3 = time.time()
                if opt.name != 'default':
                    det_tensor = util.tensor2dettensor(visuals['fake_B'], mean, opt.ssd_dim, opt.fineSize)
                else:
                    det_tensor = base_transform(image_ori, opt.ssd_dim, mean)[:,:,(2,1,0)]
                    det_tensor = torch.from_numpy(det_tensor).unsqueeze(0).permute(0, 3, 1, 2).to(device)
                with torch.no_grad():
                    arm_loc, ota_feature, loc, conf = net(det_tensor)
                    detections = detector.forward(loc, conf, priors, arm_loc_data=arm_loc, feature=ota_feature, conf_list=conf_list, tub_thresh=tub_thresh, reset_id=reset_id)
                reset_id = False
                image_det_display = [image_ori, detections]
                time4 = time.time()
                log_str += ' SSD: ' + str(np.around(time4 - time3, 4))
        print(log_str)
        time.sleep(0.2)

if __name__ == '__main__':
    vr = CamThread(opt.dataroot, opt.fineSize, opt.results_dir, opt.writename)
    fr = Thread(target=ResDet,args=(5,))
    vr.start()
    fr.start()
    vr.join()
    fr.join()
