#!/usr/bin/python3
# -*- coding: utf-8 -*-

import sys
# sys.path.append('/opt/work/caffe/python')
sys.path.insert(0, '.')

import numpy as np
import os
import struct
import cv2
from scipy.optimize import linear_sum_assignment as linear_assignment
from mmdet.ops import nms
import time
import multiprocessing


max_image_size = 512

proccess_count = 32
wait_count_thr = proccess_count*10
wait_out_count_thr = proccess_count*20

CROP = True
SHOW = False

# IoU大于等于此阈值的BBox被抑制
NMS_IOU_THR = 0.1

MIN_FACE_WIDTH = 50
MIN_FACE_HEIGHT = 50
DISPLAY_WIDTH = 1280
DISPLAY_HEIGHT = 720
MIN_BBOX_COUNT = 20
DISPLAY_ALL_FRAME = True
DISPLAY_CLASS_LIST = [
    'Person',
    'Face',
]

THRESHOLDS = [
    0.5,  # Person
    0.7,  # Cat
    0.7,  # Dog
    0.7,  # BabyCar
    0.6,  # Face
]

SLEEP = 1

CLASS_NAMES = [
    'Person',
    'Cat',
    'Dog',
    'BabyCar',
    'Face',
]

CLASS_LABELS = {
    'Person': 0,
    'Cat': 1,
    'Dog': 2,
    'BabyCar': 3,
    'Face': 4,
}

FACE_CLASS_LABEL = CLASS_LABELS['Face']
PERSON_CLASS_LABEL = CLASS_LABELS['Person']

COLORS = [
    (255, 0, 0),
    (0, 255, 0),
    (0, 255, 255),
    (255, 0, 0),
    (0, 0, 255),
]

DISPLAY_LABLE_SET = set()
for class_name in DISPLAY_CLASS_LIST:
    DISPLAY_LABLE_SET.add(CLASS_LABELS[class_name])


# video_path = r'/rootfs/media/yery/Dataset/Record'
# video_list_bbox_count_files = {
#     PERSON_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropPerson/video_crop_face_list.1.txt',
#     FACE_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropFace/video_crop_face_list.1.txt',
# }
# out_video_paths = {
#     PERSON_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropPerson1',
#     FACE_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropFace1',
# }

video_path = r'/rootfs/media/yery/Dataset/RecordEx'
video_list_bbox_count_files = {
    PERSON_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropPerson/video_crop_face_list.2.txt',
    FACE_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropFace/video_crop_face_list.2.txt',
}
out_video_paths = {
    PERSON_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropPerson2',
    FACE_CLASS_LABEL: r'/rootfs/media/yery/Kaso/data/VideoGenderCropFace2',
}


def display_video(video_path, bbox_info_dict):
    cap = cv2.VideoCapture(video_path)

    is_pause = True
    frame_id = 0
    while True:
        grabbed, image = cap.read()

        if not grabbed:
            break

        has_display_lable = False
        bbox_info_list = bbox_info_dict.get(frame_id, None)
        if bbox_info_list is not None:
            for bbox_info in bbox_info_list:
                lable = bbox_info[0]
                if lable not in DISPLAY_LABLE_SET:
                    continue
                has_display_lable = True
                color = COLORS[lable]
                _class_name = CLASS_NAMES[lable]
                bbox = bbox_info[1:]
                label_text = '{}'.format(_class_name)
                cv2.putText(image, label_text, (bbox[0], bbox[1] + 32), cv2.FONT_HERSHEY_COMPLEX, 1.0, color)
                cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, thickness=2)

        frame_id += 1

        if DISPLAY_ALL_FRAME or has_display_lable:
            if (image.shape[0] != DISPLAY_HEIGHT) or (image.shape[1] != DISPLAY_WIDTH):
                image = cv2.resize(image, (DISPLAY_WIDTH, DISPLAY_HEIGHT))
            cv2.imshow('image', image)
            if is_pause:
                key = cv2.waitKey()
            else:
                key = cv2.waitKey(SLEEP)

            if -1 != key:
                if key == ord('p'):
                    is_pause = not is_pause
                elif key == ord('n'):
                    break
                elif key == ord('q'):
                    return False
                else:
                    while key != ord(' '):
                        key = cv2.waitKey()
                        if key == ord('q'):
                            return False

    return True


def image_encode(image_path, image, quality=None):
    if quality is None:
        cv2.imwrite(image_path, image)
    else:
        params = (cv2.IMWRITE_JPEG_QUALITY, quality)
        cv2.imwrite(image_path, image, params=params)
    # os.system('chmod a+wr {}'.format(image_path))


def crop_run(file_queue, out_queue, id):
    # output_dir = args.output_dir
    try:
        while True:
            file_info = file_queue.get(block=True)
            if file_info is None:
                break
            video_name, bbox_info_dict = file_info
            video_file_path = os.path.join(video_path, video_name)

            try:
                cap = cv2.VideoCapture(video_file_path)
                video_dir = os.path.splitext(video_name)[0]
                out_video_dirs = {}
                for label, out_video_path in out_video_paths.items():
                    out_video_dir = os.path.join(out_video_path, video_dir)
                    out_video_dirs[label] = out_video_dir
                    if not os.path.exists(out_video_dir):
                        os.makedirs(out_video_dir)
                frame_id = 0
                while True:
                    grabbed, image = cap.read()

                    if not grabbed:
                        break

                    bbox_info_list = bbox_info_dict.get(frame_id, None)
                    if bbox_info_list is not None:
                        w, h = image.shape[1], image.shape[0]
                        for i, bbox_info in enumerate(bbox_info_list):
                            label = bbox_info[0]
                            bbox = bbox_info[1:5]
                            # image_name = '{:05d}_{:03d}.jpg'.format(frame_id, i)
                            image_name = bbox_info[5]
                            bbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
                            crop_image = image[bbox[1]:bbox[3], bbox[0]:bbox[2]]
                            if label == FACE_CLASS_LABEL:
                                quality = 98
                            elif label == PERSON_CLASS_LABEL:
                                if max_image_size is not None:
                                    w, h = crop_image.shape[1], crop_image.shape[0]
                                    image_size = max(w, h)
                                    if image_size > max_image_size:
                                        scale = max_image_size / image_size
                                        w = int(round(w * scale))
                                        h = int(round(h * scale))
                                        crop_image = cv2.resize(crop_image, (w, h))
                                quality = 95
                            else:
                                continue
                            image_path = os.path.join(out_video_dirs[label], image_name)
                            image_encode(image_path, crop_image, quality=quality)
                    frame_id += 1
                while out_queue.qsize() > wait_out_count_thr:
                    time.sleep(0.01)
                out_queue.put(((video_dir, ), False, id))
            except Exception as e:
                print(e)
    except Exception as e:
        if str(e) != '':
            print(e)

    while out_queue.qsize() > 0:
        time.sleep(0.01)
    out_queue.put((None, True, id))
    time.sleep(0.5)


def out_run(out_queue, total_file_count):
    file_count = 0
    out_file_path = os.path.join(video_path, 'video_crop_list.txt')
    out_file = open(out_file_path, 'a')
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            video_info, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if proccess_count <= finish_worker_count:
                    break
                continue
            file_count += 1
            if video_info is None:
                continue
            video_dir = video_info[0]
            out_info = '{}\n'.format(video_dir)
            out_file.write(out_info)
            print('{:06f}, Proc{}, Count: {}/{}, {}'.format(time.time(), id, file_count, total_file_count, video_dir))
            if file_count % 20 == 0:
                out_file.flush()
    except Exception as e:
        print(e)
    print('{:06f}, Proc{}, Count: {}/{}'.format(time.time(), id, file_count, total_file_count))
    out_file.close()
    os.system('chmod a+wr {}'.format(out_file_path))


def main():
    video_crop_list_file_path = os.path.join(video_path, 'video_crop_list.txt')
    video_set = set()
    if os.path.exists(video_crop_list_file_path):
        with open(video_crop_list_file_path, 'r') as file:
            for line in file.readlines():
                video_name = line.split()[0].strip()
                video_set.add(video_name)

    video_bbox_infos = {}
    for label, video_list_bbox_count_file in video_list_bbox_count_files.items():
        with open(video_list_bbox_count_file, 'r') as file:
            # gd/zs/nqjd/dxjyjy/150100414a54443452064d2742c93600/video/20190817_052156/00002_000.jpg 11,12,83,72 295,374,367,434
            for line in file.readlines():
                lines = line.strip().split()
                file_path = lines[0]
                video_name = os.path.dirname(file_path)
                if video_name in video_set:
                    continue
                video_name = video_name + '.avi'
                file_name = os.path.basename(file_path)
                bbox = lines[2]
                bbox = list(map(int, bbox.split(',')))
                frame_id = int(file_name.split('_')[0])
                bbox_info_dicts = video_bbox_infos.get(video_name, None)
                bbox_info = [label, *bbox, file_name]
                if bbox_info_dicts is None:
                    video_bbox_infos[video_name] = {frame_id: [bbox_info]}
                else:
                    bbox_infos = bbox_info_dicts.get(frame_id, None)
                    if bbox_infos is None:
                        bbox_info_dicts[frame_id] = [bbox_info]
                    else:
                        bbox_infos.append(bbox_info)

    total_file_count = len(video_bbox_infos)

    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    workers = []
    for i in range(proccess_count):
        workers.append(multiprocessing.Process(target=crop_run, args=(file_queue, out_queue, i)))

    for i in range(proccess_count):
        workers[i].start()

    out_worker = multiprocessing.Process(target=out_run, args=(out_queue, total_file_count))
    out_worker.start()

    video_list = list(video_bbox_infos.keys())
    video_list.sort()
    for video_name in video_list:
        bbox_info_dict = video_bbox_infos[video_name]
        if CROP:
            if len(bbox_info_dict) > 0:
                while file_queue.qsize() > wait_count_thr:
                    time.sleep(0.01)
                # 缓存文件提高机器硬盘读取速度
                video_file_path = os.path.join(video_path, video_name)
                with open(video_file_path, 'rb') as _file:
                    _file_buffer = _file.read()
                file_queue.put((video_name, bbox_info_dict))
        else:
            if SHOW:
                if not display_video(os.path.join(video_path, video_name), bbox_info_dict):
                    break

    for i in range(proccess_count):
        file_queue.put(None)

    out_worker.join()

    for i in range(proccess_count):
        workers[i].join()
    print('Finish!')


if __name__ == '__main__':
    main()
