#!/usr/bin/python3
# -*- coding: utf-8 -*-

import sys
# sys.path.append('/opt/work/caffe/python')
sys.path.insert(0, '.')

import numpy as np
import os
import struct
import cv2
from scipy.optimize import linear_sum_assignment as linear_assignment
from mmdet.ops import nms
import time
import multiprocessing

video_path = r'/rootfs/media/kasim/DataSet/Record2'
video_list_bbox_file = r'/rootfs/media/kasim/DataSet/Record2/video_bbox_count.txt'
video_list_bbox_path = r'/rootfs/media/kasim/DataSet/Record2'
out_video_path = r'/rootfs/media/kasim/Data1/data/VideoGenderCropPerson'

video_list_file = r'/rootfs/media/kasim/DataSet/Record2/one_person_face_video_list.txt'

max_image_size = 512

proccess_count = 32
wait_count_thr = proccess_count*10
wait_out_count_thr = proccess_count*10

CROP_PERSON = True
SHOW = False

# IoU大于等于此阈值的BBox被抑制
NMS_IOU_THR = 0.1

MIN_FACE_WIDTH = 50
MIN_FACE_HEIGHT = 50
DISPLAY_WIDTH = 1280
DISPLAY_HEIGHT = 720
MIN_BBOX_COUNT = 20
DISPLAY_ALL_FRAME = True
DISPLAY_CLASS_LIST = [
    'Person',
    'Face',
]

THRESHOLDS = [
    0.5,  # Person
    0.7,  # Cat
    0.7,  # Dog
    0.7,  # BabyCar
    0.6,  # Face
]

SLEEP = 1

CLASS_NAMES = [
    'Person',
    'Cat',
    'Dog',
    'BabyCar',
    'Face',
]

CLASS_LABELS = {
    'Person': 0,
    'Cat': 1,
    'Dog': 2,
    'BabyCar': 3,
    'Face': 4,
}

FACE_CLASS_LABEL = CLASS_LABELS['Face']
PERSON_CLASS_LABEL = CLASS_LABELS['Person']

COLORS = [
    (255, 0, 0),
    (0, 255, 0),
    (0, 255, 255),
    (255, 0, 0),
    (0, 0, 255),
]

DISPLAY_LABLE_SET = set()
for class_name in DISPLAY_CLASS_LIST:
    DISPLAY_LABLE_SET.add(CLASS_LABELS[class_name])


def display_video(video_path, bbox_info_dict):
    cap = cv2.VideoCapture(video_path)

    is_pause = True
    frame_id = 0
    while True:
        grabbed, image = cap.read()

        if not grabbed:
            break

        has_display_lable = False
        bbox_info_list = bbox_info_dict.get(frame_id, None)
        if bbox_info_list is not None:
            for bbox_info in bbox_info_list:
                lable = bbox_info[0]
                if lable not in DISPLAY_LABLE_SET:
                    continue
                has_display_lable = True
                color = COLORS[lable]
                _class_name = CLASS_NAMES[lable]
                bbox = bbox_info[1:]
                label_text = '{} {:.2f}'.format(_class_name, bbox[4])
                cv2.putText(image, label_text, (bbox[0], bbox[1] + 32), cv2.FONT_HERSHEY_COMPLEX, 1.0, color)
                cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, thickness=2)

        frame_id += 1

        if DISPLAY_ALL_FRAME or has_display_lable:
            if (image.shape[0] != DISPLAY_HEIGHT) or (image.shape[1] != DISPLAY_WIDTH):
                image = cv2.resize(image, (DISPLAY_WIDTH, DISPLAY_HEIGHT))
            cv2.imshow('image', image)
            if is_pause:
                key = cv2.waitKey()
            else:
                key = cv2.waitKey(SLEEP)

            if -1 != key:
                if key == ord('p'):
                    is_pause = not is_pause
                elif key == ord('n'):
                    break
                elif key == ord('q'):
                    return False
                else:
                    while key != ord(' '):
                        key = cv2.waitKey()
                        if key == ord('q'):
                            return False

    return True


def iof(bb_face, bb_person):
    """
    Computes "Intersection over Face" between two bboxes in the form [x1,y1,x2,y2]
    """
    xx1 = np.maximum(bb_face[:, 0], bb_person[:, 0])
    yy1 = np.maximum(bb_face[:, 1], bb_person[:, 1])
    xx2 = np.minimum(bb_face[:, 2], bb_person[:, 2])
    yy2 = np.minimum(bb_face[:, 3], bb_person[:, 3])
    w = np.maximum(0., xx2 - xx1)
    h = np.maximum(0., yy2 - yy1)
    wh = w * h
    iof = wh / ((bb_face[:, 2] - bb_face[:, 0]) * (bb_face[:, 3] - bb_face[:, 1]))
    return iof


def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):
    """Calculate overlap between two set of bboxes.

    If ``is_aligned`` is ``False``, then calculate the ious between each bbox
    of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
    bboxes1 and bboxes2.

    Args:
        bboxes1 (Tensor): shape (m, 4)
        bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n
            must be equal.
        mode (str): "iou" (intersection over union) or iof (intersection over
            foreground).

    Returns:
        ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1)
    """

    assert mode in ['iou', 'iof']

    rows = bboxes1.shape[0]
    cols = bboxes2.shape[0]
    if is_aligned:
        assert rows == cols

    # if rows * cols == 0:
    #     return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)

    if is_aligned:
        lt = np.maximum(bboxes1[:, :2], bboxes2[:, :2])  # [rows, 2]
        rb = np.minimum(bboxes1[:, 2:4], bboxes2[:, 2:4])  # [rows, 2]

        wh = np.clip((rb - lt + 1), 0, None)  # [rows, 2]
        overlap = wh[:, 0] * wh[:, 1]
        area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
            bboxes1[:, 3] - bboxes1[:, 1] + 1)

        if mode == 'iou':
            area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
                bboxes2[:, 3] - bboxes2[:, 1] + 1)
            ious = overlap / (area1 + area2 - overlap)
        else:
            ious = overlap / area1
    else:
        lt = np.maximum(bboxes1[:, None, :2], bboxes2[:, :2])  # [rows, cols, 2]
        rb = np.minimum(bboxes1[:, None, 2:4], bboxes2[:, 2:4])  # [rows, cols, 2]

        wh = np.clip((rb - lt + 1), 0, None)  # [rows, cols, 2]
        overlap = wh[:, :, 0] * wh[:, :, 1]
        area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
            bboxes1[:, 3] - bboxes1[:, 1] + 1)

        if mode == 'iou':
            area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
                bboxes2[:, 3] - bboxes2[:, 1] + 1)
            ious = overlap / (area1[:, None] + area2 - overlap)
        else:
            ious = overlap / (area1[:, None])

    return ious


def _linear_assignment(matrix):
    indices = linear_assignment(matrix)
    if isinstance(indices, (tuple, list)):
        indices = np.stack(indices, 1)
    return indices


def matching(face_boxes, person_boxes, iof_threshold=0.8):
    iof_matrix = bbox_overlaps(face_boxes, person_boxes, mode='iof')
    matched_indices = _linear_assignment(-iof_matrix)

    unmatched_faces = []

    for f, face in enumerate(face_boxes):
        if f not in matched_indices[:, 0]:
            unmatched_faces.append(f)

    unmatched_persons = []
    for p, person in enumerate(person_boxes):
        if p not in matched_indices[:, 1]:
            unmatched_persons.append(p)

    # filter out matched with low IOU
    matches = []
    for m in matched_indices:
        if iof_matrix[m[0], m[1]] < iof_threshold:
            unmatched_faces.append(m[0])
            unmatched_persons.append(m[1])
        else:
            matches.append(m.reshape(1, 2))
    if len(matches) == 0:
        matches = np.empty((0, 2), dtype=int)
    else:
        matches = np.concatenate(matches, axis=0)

    return matches, np.array(unmatched_faces), np.array(unmatched_persons)


def MarginalBBox(bbox, margins=[0.3, 0.4, 0.3, 0.2]):
    xyc = (bbox[:, :2] + bbox[:, 2:4]) / 2
    whbox = (bbox[:, 2:4] - bbox[:, :2]) / 2

    m1 = np.array([[1 + margins[0], 1 + margins[1]]], dtype=np.float32)
    m2 = np.array([[1 + margins[2], 1 + margins[3]]], dtype=np.float32)

    lt = np.clip(xyc - whbox*m1, 0, None)
    rb = xyc + whbox*m2

    if bbox.shape[1] > 4:
        bbox = np.hstack([lt, rb, bbox[:, 4:]])
    else:
        bbox = np.hstack([lt, rb])

    return bbox


def CropMarginalBBox(bbox, w, h, margins=[0.3, 0.4, 0.3, 0.2]):
    xc = (bbox[0] + bbox[2]) / 2
    yc = (bbox[1] + bbox[3]) / 2
    wbox = (bbox[2] - bbox[0]) / 2
    hbox = (bbox[3] - bbox[1]) / 2

    # Add margin to the patch along each side, based on size from the center of the crop
    xmin = max(0, xc - wbox * (1 + margins[0]))
    ymin = max(0, yc - hbox * (1 + margins[1]))
    xmax = min(w, xc + wbox * (1 + margins[2]))
    ymax = min(h, yc + hbox * (1 + margins[3]))

    bbox = [int(xmin), int(ymin), int(xmax), int(ymax)]
    return bbox


def image_encode(image_path, image, quality=None):
    if quality is None:
        cv2.imwrite(image_path, image)
    else:
        params = (cv2.IMWRITE_JPEG_QUALITY, quality)
        cv2.imwrite(image_path, image, params=params)
    # os.system('chmod a+wr {}'.format(image_path))


def crop_face_run(file_queue, out_queue, id):
    # output_dir = args.output_dir
    try:
        while True:
            file_info = file_queue.get(block=True)
            if file_info is None:
                break
            video_name, bbox_info_dict = file_info
            video_file_path = os.path.join(video_path, video_name)

            try:
                cap = cv2.VideoCapture(video_file_path)
                video_dir = os.path.splitext(video_name)[0]
                out_video_dir = os.path.join(out_video_path, video_dir)
                if not os.path.exists(out_video_dir):
                    os.makedirs(out_video_dir)
                frame_id = 0
                while True:
                    grabbed, image = cap.read()

                    if not grabbed:
                        break

                    bbox_info_list = bbox_info_dict.get(frame_id, None)
                    if bbox_info_list is not None:
                        w, h = image.shape[1], image.shape[0]
                        for i, bbox_info in enumerate(bbox_info_list):
                            lable = bbox_info[0]
                            if lable != FACE_CLASS_LABEL:
                                continue
                            bbox = bbox_info[1:5]
                            crop_bbox = CropMarginalBBox(bbox, w, h)
                            bbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
                            x, y = crop_bbox[0], crop_bbox[1]
                            new_bbox = [(bbox[0]-x), (bbox[1]-y), (bbox[2]-x), (bbox[3]-y)]
                            crop_image = image[crop_bbox[1]:crop_bbox[3], crop_bbox[0]:crop_bbox[2]]

                            image_name = '{:05d}_{:03d}.jpg'.format(frame_id, i)
                            image_path = os.path.join(out_video_dir, image_name)
                            image_encode(image_path, crop_image, quality=98)

                            while out_queue.qsize() > wait_out_count_thr:
                                time.sleep(0.01)
                            image_path = os.path.join(video_dir, image_name)
                            out_queue.put(((image_path, new_bbox, bbox), False, id))
                    frame_id += 1
            except Exception as e:
                print(e)
    except Exception as e:
        if str(e) != '':
            print(e)

    while out_queue.qsize() > 0:
        time.sleep(0.01)
    out_queue.put((None, True, id))
    time.sleep(0.5)


def crop_person_run(file_queue, out_queue, id):
    # output_dir = args.output_dir
    try:
        while True:
            file_info = file_queue.get(block=True)
            if file_info is None:
                break
            video_name, bbox_info_dict = file_info
            video_file_path = os.path.join(video_path, video_name)

            try:
                cap = cv2.VideoCapture(video_file_path)
                video_dir = os.path.splitext(video_name)[0]
                out_video_dir = os.path.join(out_video_path, video_dir)
                if not os.path.exists(out_video_dir):
                    os.makedirs(out_video_dir)
                frame_id = 0
                while True:
                    grabbed, image = cap.read()

                    if not grabbed:
                        break

                    bbox_info_list = bbox_info_dict.get(frame_id, None)
                    if bbox_info_list is not None:
                        w, h = image.shape[1], image.shape[0]
                        for i, bbox_info in enumerate(bbox_info_list):
                            lable = bbox_info[0]
                            if lable != PERSON_CLASS_LABEL:
                                continue
                            bbox = bbox_info[1:5]
                            bbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
                            new_bbox = [0, 0, (bbox[2]-bbox[0]), (bbox[3]-bbox[1])]
                            crop_image = image[bbox[1]:bbox[3], bbox[0]:bbox[2]]
                            if max_image_size is not None:
                                w, h = crop_image.shape[1], crop_image.shape[0]
                                image_size = max(w, h)
                                if image_size > max_image_size:
                                    scale = max_image_size / image_size
                                    w = int(round(w * scale))
                                    h = int(round(h * scale))
                                    crop_image = cv2.resize(crop_image, (w, h))

                            image_name = '{:05d}_{:03d}.jpg'.format(frame_id, i)
                            image_path = os.path.join(out_video_dir, image_name)
                            image_encode(image_path, crop_image, quality=95)

                            while out_queue.qsize() > wait_out_count_thr:
                                time.sleep(0.01)
                            image_path = os.path.join(video_dir, image_name)
                            out_queue.put(((image_path, new_bbox, bbox), False, id))
                    frame_id += 1
            except Exception as e:
                print(e)
    except Exception as e:
        if str(e) != '':
            print(e)

    while out_queue.qsize() > 0:
        time.sleep(0.01)
    out_queue.put((None, True, id))
    time.sleep(0.5)


def out_run(out_queue, total_file_count):
    file_count = 0
    out_file_dir = out_video_path
    if not os.path.exists(out_file_dir):
        os.makedirs(out_file_dir)
        os.system('chmod a+wr {}'.format(out_file_dir))
    out_file_path = os.path.join(out_video_path, 'video_crop_face_list.txt')
    out_file = open(out_file_path, 'w')
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            image_info, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if proccess_count <= finish_worker_count:
                    break
                continue
            file_count += 1
            if image_info is None:
                continue
            image_path, new_bbox, bbox = image_info
            out_info = '{} {},{},{},{} {},{},{},{}\n'.format(image_path, *new_bbox, *bbox)
            out_file.write(out_info)
            if file_count % 1000 == 0:
                print('{:06f}, Proc{}, Count: {}, {}'.format(time.time(), id, file_count, image_path))
                out_file.flush()
    except Exception as e:
        print(e)
    print('{:06f}, Proc{}, Count: {}'.format(time.time(), id, file_count))
    out_file.close()
    os.system('chmod a+wr {}'.format(out_file_path))


def main():
    video_bbox_count_dict = {}
    with open(video_list_bbox_file, 'r') as file:
        for video_info in file.readlines():
            video_info = video_info.split(',')
            video_bbox_count_dict[video_info[0].strip()] = int(video_info[1].strip())

    video_list = []
    with open(video_list_file, 'r') as file:
        for line in file.readlines():
            video_name = line.split()[0].strip()
            video_list.append(video_name)

    total_file_count = len(video_list)

    _THRESHOLDS = []
    for label, thr in enumerate(THRESHOLDS):
        if label in DISPLAY_LABLE_SET:
            _THRESHOLDS.append(thr)
        else:
            _THRESHOLDS.append(1.1)

    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    workers = []
    for i in range(proccess_count):
        if CROP_PERSON:
            workers.append(multiprocessing.Process(target=crop_person_run, args=(file_queue, out_queue, i)))
        else:
            workers.append(multiprocessing.Process(target=crop_face_run, args=(file_queue, out_queue, i)))

    for i in range(proccess_count):
        workers[i].start()

    out_worker = multiprocessing.Process(target=out_run, args=(out_queue, total_file_count))
    out_worker.start()

    file_count = 0
    for video_name in video_list:
        file_count += 1
        # print('{}/{} {}'.format(file_count, total_file_count, video_name))
        bbox_count = video_bbox_count_dict.get(video_name, 0)
        if bbox_count < MIN_BBOX_COUNT:
            continue

        bbox_info_dict = {}
        with open(os.path.join(video_list_bbox_path, os.path.splitext(video_name)[0]+'.dat'), 'rb') as file:
            for j in range(bbox_count):
                bbox_info = file.read(28)
                if len(bbox_info) < 28:
                    break
                bbox_info = struct.unpack('6i1f', bbox_info)
                frame_id = bbox_info[0]
                label = bbox_info[1]
                score = bbox_info[6]
                if score < _THRESHOLDS[label]:
                    continue

                bbox = bbox_info[2:]
                if label == FACE_CLASS_LABEL:
                    w = bbox[2] - bbox[0]
                    h = bbox[3] - bbox[1]
                    if w < MIN_FACE_WIDTH or h < MIN_FACE_HEIGHT:
                        continue
                bbox_infos = bbox_info_dict.get(frame_id, None)
                if bbox_infos is None:
                    bbox_info_dict[frame_id] = {label: [bbox]}
                else:
                    _bbox_info = bbox_infos.get(label, None)
                    if _bbox_info is None:
                        bbox_infos[label] = [bbox]
                    else:
                        _bbox_info.append(bbox)

        skip_video = True
        for frame_id, bbox_infos in bbox_info_dict.items():
            face_bboxes = bbox_infos.get(FACE_CLASS_LABEL, None)
            person_bboxes = bbox_infos.get(PERSON_CLASS_LABEL, None)
            if (face_bboxes is not None) and (person_bboxes is not None):
                skip_video = False
                break

        if skip_video:
            continue

        if len(bbox_info_dict) < 1:
            continue

        _bbox_info_dict = {}
        for frame_id, bbox_infos in bbox_info_dict.items():
            face_bboxes = bbox_infos.get(FACE_CLASS_LABEL, None)
            person_bboxes = bbox_infos.get(PERSON_CLASS_LABEL, None)
            if CROP_PERSON:
                if person_bboxes is not None:
                    if len(person_bboxes) > 0:
                        _bbox_info_dict[frame_id] = [[PERSON_CLASS_LABEL, *bbox] for bbox in person_bboxes]
            else:
                if face_bboxes is not None:
                    if len(face_bboxes) > 0:
                        _bbox_info_dict[frame_id] = [[FACE_CLASS_LABEL, *bbox] for bbox in face_bboxes]

        if CROP_PERSON:
            if len(_bbox_info_dict) > 0:
                while file_queue.qsize() > wait_count_thr:
                    time.sleep(0.01)
                # 缓存文件提高机器硬盘读取速度
                video_file_path = os.path.join(video_path, video_name)
                with open(video_file_path, 'rb') as _file:
                    _file_buffer = _file.read()
                file_queue.put((video_name, _bbox_info_dict))
        else:
            if SHOW:
                if not display_video(os.path.join(video_path, video_name), bbox_info_dict):
                    break

    for i in range(proccess_count):
        file_queue.put(None)

    out_worker.join()

    for i in range(proccess_count):
        workers[i].join()
    print('Finish!')


if __name__ == '__main__':
    main()
