import argparse
import sys
import cv2
import mmcv
import numpy as np
import torch


from mmcv.runner import load_checkpoint
from actionclas.models import build_detector
from collections import deque
from threading import Thread
from PaddleInfer import Detector,predict_image
import tracker
'''
以下设置字体显示属性
'''

FONTFACE = cv2.FONT_HERSHEY_DUPLEX
FONTSCALE = 0.5
FONTCOLOR = (255, 255, 255)  # BGR, white
MSGCOLOR = (128, 128, 128)  # BGR, gray
THICKNESS = 1
LINETYPE = 1



def parse_args():
    parser = argparse.ArgumentParser(description='Action Detect demo')
    parser.add_argument(
        '--video',
        default='0',
        help='input video path or camera id')
    parser.add_argument(
        '--device',
        default='cuda:0',
        help='Device used for inference')
    parser.add_argument(
        '--det_model_dir',
        type=str,default='Model/outputPP',
        help="Directory :'__model__', '__params__', 'infer_cfg.yml', create by export_model.py")
    parser.add_argument(
        '--det_label_file',
        type=str,
        default='Model/outputPP/label_list.txt',
        help="Path of label file.")
    parser.add_argument(
        '--det-score-thr',
        type=float,
        default=0.35,
        help='the threshold of human detection score')
    parser.add_argument(
        '--label-map', default='Model/outputAVA/label_map_ava.txt',
        help='label map file')
    parser.add_argument(
        '--act_config',
        default=('Model/outputAVA/ava/slowfast.py'),
        help='spatio temporal detection config file path')
    parser.add_argument(
        '--act_checkpoint',
        default='Model/outputAVA/slowfast.pth',
        help='spatio temporal detection checkpoint file/url')
    parser.add_argument(
        '--predict-stepsize',
        default=2,
        type=int,
        help='give out a prediction per n frames')
    args = parser.parse_args()
    return args
def show():
    """
    show.
    """
    end_res=[]
    cv2.namedWindow('demo',cv2.WINDOW_NORMAL)
    while True:
        ret,frame=vcap.read()
        if ret==False:
            print('No image exist\n')
            continue
        im = cv2.resize(frame, (960, 540))  #缩放图像加速计算
        im1=cv2.resize(frame, (960, 540))
        # im=cv2.rotate(im,cv2.ROTATE_180)
        # im1 = cv2.rotate(im1, cv2.ROTATE_180)
        detres=predict_image(det_model,0.25,im)
        if len(detres)>0:
            dd=[]
            for res in detres['boxes']:
                if detlabels[int(res[0])].strip('\n') == 'person':
                    dd.append((res[2], res[3], res[4], res[5], res[1]))
            tracklist=tracker.update(dd,im)
            tracker.draw_bboxes(im,tracklist,line_thickness=None)
            if len(tracklist)>0:
                frame_queue.append((im1,tracklist))
                # print('frame_queue len:',len(frame_queue))
            if len(result_queue) != 0:
                results = result_queue.popleft()
                end_res=results
                if len(end_res)>0:
                    for ann in end_res:
                        box = ann[0]
                        for t_list in tracklist:
                            if box[4]==t_list[5]:   #匹配当前id与动作id
                                label = ann[1]
                                if not len(label):
                                    continue
                                score = ann[2]
                                # box = box.astype(np.int64)
                                st = (int(t_list[0]), int(t_list[1]))
                                ed = (int(t_list[2]), int(t_list[3]))
                                cv2.rectangle(im, st, ed, color=(255, 0, 255), thickness=2)
                                for k, lb in enumerate(label):
                                    if k >= 5:
                                        break
                                    text = abbrev(lb)
                                    if text == "sit" or text == "stand":
                                        pass
                                    else:
                                        text = ': '.join([text, str(score[k])])
                                        location = (0 + st[0], 18 +k*18+ st[1])
                                        textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
                                                           THICKNESS)[0]
                                        textwidth = textsize[0]
                                        diag0 = (location[0] + textwidth, location[1] - 14)
                                        diag1 = (location[0], location[1] + 2)
                                        cv2.rectangle(im, diag0, diag1, color=(0, 0, 255), thickness=-1)
                                        cv2.putText(im, text, location, FONTFACE, FONTSCALE,FONTCOLOR, THICKNESS, LINETYPE)

            if len(end_res)>0:
                for ann in end_res:
                    box = ann[0]
                    for t_list in tracklist:
                        if box[4] == t_list[5]:  # 匹配当前id与动作id
                            label = ann[1]
                            if not len(label):
                                continue
                            score = ann[2]
                            # box = box.astype(np.int64)
                            st = (int(t_list[0]), int(t_list[1]))
                            ed = (int(t_list[2]), int(t_list[3]))
                            cv2.rectangle(im, st, ed, color=(255, 0, 255), thickness=2)
                            for k, lb in enumerate(label):
                                if k >= 5:
                                    break
                                text = abbrev(lb)
                                if text == "sit" or text == "stand":
                                    pass
                                else:
                                    text = ': '.join([text, str(score[k])])
                                    location = (0 + st[0], 18 + k*18+st[1])
                                    textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE,
                                                               THICKNESS)[0]
                                    textwidth = textsize[0]
                                    diag0 = (location[0] + textwidth, location[1] - 14)
                                    diag1 = (location[0], location[1] + 2)
                                    cv2.rectangle(im, diag0, diag1, color=(0, 0, 255), thickness=-1)
                                    cv2.putText(im, text, location, FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS,
                                                LINETYPE)
                # print('results:', end_res)
                # cv2.putText(im, text=str(end_res[1]), org=(100, 100), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=1.5,color=(255, 0, 0), thickness=2)
        cv2.imshow('demo',im)
        ch=cv2.waitKey(1)
        videoWriter.write(im)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            frame_queue.clear()
            vcap.release()
            videoWriter.release()
            break
def inference():
    """
    inference.
    """
    while True:
        cur_windows=[]
        while len(cur_windows)==0:
            if len(frame_queue) == sample_length:
                cur_windows = list(np.array(frame_queue))
                #提取图像帧
                num_frame = len(cur_windows)
                # Note that it's 1 based here
                timestamps = np.arange(window_size // 2, num_frame + 1 - window_size // 2,
                                       predict_stepsize)
                center_frames = [cur_windows[ind - 1][0] for ind in timestamps]
                human_detections = []
                dd=[cur_windows[ind - 1][1] for ind in timestamps]
                for idx in range(len(dd)):
                    tmp=dd[idx]
                    tmp1=[]
                    for tt in tmp:
                        tmp1.append([tt[0].astype('float32'),tt[1].astype('float32'),tt[2].astype('float32'),tt[3].astype('float32'),tt[5].astype('float32')])
                    human_detections.append(np.array(tmp1))
                    break
                if len(human_detections) == 0:
                    print('No person exist\n')
                    while len(frame_queue) >= 25:
                        del frame_queue[0]
                    continue
                # print(human_detections)
                input_recog=[]
                aa = human_detections[0]
                if aa.shape[0] == 0:
                    continue
                aa[:, 0:4:2] *= w_ratio
                aa[:, 1:4:2] *= h_ratio
                input_recog.append(torch.from_numpy(aa[:,:4]).to(device))

                predictions = []
                proposal = input_recog[0]
                if proposal.shape[0] == 0:
                    while len(frame_queue) >= 20:
                        del frame_queue[0]
                    return
                # frames = [mmcv.imresize(img, (new_w, new_h)) for img in cur_windows]
                start_frame = timestamps[0] - (clip_len // 2 - 1) * frame_interval
                frame_inds = start_frame + np.arange(0, window_size, frame_interval)
                frame_inds = list(frame_inds - 1)
                imgs = [mmcv.imresize(cur_windows[ind][0], (new_w, new_h)).astype(np.float32) for ind in frame_inds]
                _ = [mmcv.imnormalize_(img, **img_norm_cfg) for img in imgs]
                # THWC -> CTHW -> 1CTHW
                input_array = np.stack(imgs).transpose((3, 0, 1, 2))[np.newaxis]
                input_tensor = torch.from_numpy(input_array).to(device)

                with torch.no_grad():
                    result = act_model(
                        return_loss=False,
                        img=[input_tensor],
                        img_metas=[[dict(img_shape=(new_h, new_w))]],
                        proposals=[[proposal]])
                    result = result[0]
                    prediction = []
                    # N proposals
                    for i in range(proposal.shape[0]):
                        prediction.append([])
                    # Perform action score thr
                    for i in range(len(result)):
                        if i + 1 not in actionlabels:
                            continue
                        for j in range(proposal.shape[0]):
                            if result[i][j, 4] > 0.40:
                                prediction[j].append((actionlabels[i + 1], result[i][j, 4]))
                    predictions.append(prediction)
                results = []
                for human_detection, prediction in zip(human_detections[0], predictions[0]):
                    print(human_detection)
                    print(prediction)
                    # prediction_temp = []
                    results.append(
                        (human_detection, [x[0] for x in prediction], [x[1] for x in prediction]))
                    # results.append(prediction_temp)  # results(人物候选框，人物动作标签，人物动作得分)
                if len(results) > 0:
                    result_queue.append(results)
                frame_queue.clear()
def load_label_map(file_path):
    """Load Label Map.

    Args:
        file_path (str): The file path of label map.

    Returns:
        dict: The label map (int -> label name).
    """
    lines = open(file_path).readlines()
    lines = [x.strip().split(': ') for x in lines]
    return {int(x[0]): x[1] for x in lines}
def abbrev(name):
    """Get the abbreviation of label name:

    'take (an object) from (a person)' -> 'take ... from ...'
    """
    while name.find('(') != -1:
        st, ed = name.find('('), name.find(')')
        name = name[:st] + '...' + name[ed + 1:]
    return name
def main():
    global vcap, det_model, detlabels, sample_length
    global frame_queue, result_queue
    global window_size, predict_stepsize, w_ratio, h_ratio, clip_len, frame_interval
    global device, endFlag
    global new_w, new_h, img_norm_cfg, act_model, actionlabels
    global videoWriter
    #线程结束标志

    args = parse_args()
    print(args)
    endFlag = 1
    device=args.device
    # get detect label
    with open(args.det_label_file, 'r', encoding='utf-8') as f:
        detlabels = f.readlines()
    print('Load det lab map success\n')
    # build the det model from a config file and a checkpoint file
    det_model = Detector(args.det_model_dir, use_gpu=True, run_mode='fluid')

    print('Load det model success\n')

    # Load action label_map
    actionlabels = load_label_map(args.label_map)

    print('Load action label map success\n')

    # build the act model from a config file and a checkpoint file
    # Get clip_len, frame_interval and calculate center index of each clip
    config = mmcv.Config.fromfile(args.act_config)
    # Get img_norm_cfg
    img_norm_cfg = config['img_norm_cfg']
    if 'to_rgb' not in img_norm_cfg and 'to_bgr' in img_norm_cfg:
        to_bgr = img_norm_cfg.pop('to_bgr')
        img_norm_cfg['to_rgb'] = to_bgr
    img_norm_cfg['mean'] = np.array(img_norm_cfg['mean'])
    img_norm_cfg['std'] = np.array(img_norm_cfg['std'])

    # Build STDET model
    config.model.backbone.pretrained = None
    act_model = build_detector(config.model, test_cfg=config.test_cfg)

    load_checkpoint(act_model, args.act_checkpoint, map_location=args.device)
    act_model.to(args.device)
    act_model.eval()

    print('Load action model success\n')

    # build the act model from a config file and a checkpoint file
    # Get clip_len, frame_interval and calculate center index of each clip
    config = mmcv.Config.fromfile(args.act_config)
    # Get img_norm_cfg
    img_norm_cfg = config['img_norm_cfg']
    if 'to_rgb' not in img_norm_cfg and 'to_bgr' in img_norm_cfg:
        to_bgr = img_norm_cfg.pop('to_bgr')
        img_norm_cfg['to_rgb'] = to_bgr
    img_norm_cfg['mean'] = np.array(img_norm_cfg['mean'])
    img_norm_cfg['std'] = np.array(img_norm_cfg['std'])


    # open video or camer
    if len(args.video) == 1:
        vcap = cv2.VideoCapture(int(args.video))
        fps = int(vcap.get(cv2.CAP_PROP_FPS))
        size = (int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        videoWriter = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc('M', 'P', 'E', 'G'), fps, (960,540))
    else:
        vcap = cv2.VideoCapture(args.video)
        fps = int(vcap.get(cv2.CAP_PROP_FPS))
        size = (int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        videoWriter = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc('M', 'P', 'E', 'G'), fps, (960,540))

    # 重置图像
    ret, frame = vcap.read()
    if ret == True:
        orih, oriw, _ = frame.shape
        new_w, new_h = mmcv.rescale_size((oriw, orih), (256, np.inf))
        w_ratio, h_ratio = new_w / oriw, new_h / orih
    else:
        print("Video or Camera Open failed\n")
        sys.exit(1)
    val_pipeline = config['val_pipeline']
    sampler = [x for x in val_pipeline if x['type'] == 'SampleAVAFrames'][0]
    clip_len, frame_interval = sampler['clip_len'], sampler['frame_interval']
    window_size = clip_len * frame_interval
    assert clip_len % 2 == 0, 'We would like to have an even clip_len'
    predict_stepsize=args.predict_stepsize

    sample_length=30
    try:
        frame_queue = deque(maxlen=sample_length)
        result_queue = deque(maxlen=30)
        pw = Thread(target=show, args=(), daemon=True)
        pr = Thread(target=inference, args=(), daemon=True)
        pw.start()
        pr.start()
        pw.join()
    except KeyboardInterrupt:
        pass
    print("end process")
if __name__ == '__main__':

    main()