#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import os

# base_path = r'/rootfs/media/kasim/Data1/data/low_select_Person'
# out_path = r'/rootfs/media/kasim/Data1/data/low_select_Person'
base_path = r'/rootfs/media/kasim/DataSet/high_select_Person'
out_path = r'/rootfs/media/kasim/DataSet/high_select_Person'
# base_path = r'/rootfs/data/ErisedData/Record'
# out_path = r'/rootfs/data/ErisedData/Record'
video_list_file = r'file_list.txt'
gpu_count = 2


def parse_args():
    parser = argparse.ArgumentParser(description='Video Detect')
    parser.add_argument('--base_path', type=str, default=base_path, help='video base path')
    parser.add_argument('--list_file', type=str, default=video_list_file, help='video list file')
    parser.add_argument('--out_path', type=str, default=out_path, help='out path')
    parser.add_argument('--proccess_count', type=int, default=4, help='proccess count')
    parser.add_argument('--gpu_count', type=int, default=gpu_count, help='gpu count')
    args = parser.parse_args()
    return args


args = parse_args()

base_path = args.base_path
video_list_file = args.list_file
out_path = args.out_path
gpu_count = args.gpu_count

import sys
# sys.path.append('/opt/work/caffe/python')
sys.path.insert(0, '.')

import cv2
import numpy as np
import struct
import multiprocessing


CHECKPOINT_FILE = 'outputs/model_dump/dump-30.pth'

############################################################################

SHOW = False
WRITE_FILE = True

SLEEP_TIME = 0
DEVICE = 'cuda'
BATCH_SIZE = 1

# SHOW = True
# THRESHOLDS = [
#     0.5,  # Person
#     0.7,  # Cat
#     0.7,  # Dog
#     0.5,  # BabyCar
#     0.5,  # Face
# ]

THRESHOLDS = [
    0.30,  # Person
    1.05,  # Cat
    1.05,  # Dog
    1.05,  # BabyCar
    1.05,  # Face
]

COLORS = [
    (255, 0, 0),
    (0, 255, 0),
    (0, 255, 255),
    (255, 0, 0),
    (0, 0, 255),
]

if_set_nms = True


def get_box(img, result, score_thr=0.3, show=False, color=(0, 255, 0)):
    res = result[result[..., 4] >= score_thr]
    if res.size < 1:
        return None
    scores = res[..., -1]
    # bboxes = np.empty_like(res, dtype=np.int32)
    np.round(res[..., :-1], out=res[..., :-1])
    bboxes = res.astype(dtype=np.int32)
    height = img.shape[0]
    width = img.shape[1]
    np.clip(bboxes[..., 0], 0, width - 1, out=bboxes[..., 0])
    np.clip(bboxes[..., 2], 0, width - 1, out=bboxes[..., 2])
    np.clip(bboxes[..., 1], 0, height - 1, out=bboxes[..., 1])
    np.clip(bboxes[..., 3], 0, height - 1, out=bboxes[..., 3])

    bboxes = bboxes.tolist()
    scores = scores.tolist()
    for i, score in enumerate(scores):
        bboxes[i][-1] = score
    if show:
        for bbox in bboxes:
            label_text = '{:.3f}'.format(bbox[4])
            cv2.putText(img, label_text, (bbox[0], bbox[1]), cv2.FONT_HERSHEY_COMPLEX, 0.7, color)
            label_text = '{}'.format(bbox[2]-bbox[0])
            cv2.putText(img, label_text, (bbox[0], int(bbox[1]+32)), cv2.FONT_HERSHEY_COMPLEX, 0.7, color)
            label_text = '{}'.format(bbox[3]-bbox[1])
            cv2.putText(img, label_text, (bbox[0], int(bbox[1]+64)), cv2.FONT_HERSHEY_COMPLEX, 0.7, color)
            cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, thickness=1)
    return bboxes


def detect_proc(file_queue, out_queue, id):
    os.environ['CUDA_VISIBLE_DEVICES'] = str(id % gpu_count)

    import torch
    import torch.onnx

    import network
    import dataset
    import misc_utils
    from config import config
    from visual_utils import draw_boxes

    def CanUseGPU():
        is_available = torch.cuda.is_available()
        device_properties = torch.cuda.get_device_properties(device=0)
        total_memory = device_properties.total_memory
        can_use_gpu = False
        if is_available and (total_memory > 7 * 1024 * 1024 * 1024):
            can_use_gpu = True
        return can_use_gpu

    def get_data(image, device):
        if config.eval_resize == False:
            resized_img, scale = image, 1
        else:
            resized_img, scale = dataset.resize_img_by_short_and_max_size(
                image, config.eval_image_short_size, config.eval_image_max_size)

        original_height, original_width = image.shape[0:2]
        height, width = resized_img.shape[0:2]
        transposed_img = np.ascontiguousarray(
            resized_img.transpose(2, 0, 1)[None, :, :, :],
            dtype=np.float32)
        im_info = np.array([height, width, scale, original_height, original_width],
                           dtype=np.float32)
        image = torch.Tensor(transposed_img).cuda(device)
        im_info = torch.Tensor(im_info[None, :]).cuda(device)
        return image, im_info

    def inference(model, images, device):
        results = []
        for image in images:
            image, im_info = get_data(image, device)
            pred_boxes = model(image, im_info)
            if if_set_nms:
                from set_nms_utils import set_cpu_nms
                n = pred_boxes.shape[0] // 2
                idents = np.tile(np.arange(n)[:, None], (1, 2)).reshape(-1, 1)
                pred_boxes = np.hstack((pred_boxes, idents))
                keep = pred_boxes[:, -2] > 0.05
                pred_boxes = pred_boxes[keep]
                keep = set_cpu_nms(pred_boxes, 0.5)
                pred_boxes = pred_boxes[keep]
            else:
                import det_tools_cuda as dtc
                nms = dtc.nms
                keep = nms(pred_boxes[:, :4], pred_boxes[:, 4], 0.5)
                pred_boxes = pred_boxes[keep]
                pred_boxes = np.array(pred_boxes)
                keep = pred_boxes[:, -1] > 0.05
                pred_boxes = pred_boxes[keep]
            # pred_boxes = pred_boxes.tolist()
            results.append(pred_boxes)
        return results

    use_gpu = CanUseGPU()

    count = 0
    try:
        # construct the model and load checkpoint
        model_file = CHECKPOINT_FILE
        assert os.path.exists(model_file)
        # get devices
        # str_devices = args.devices
        # devices = misc_utils.device_parser(str_devices)

        device = 0
        torch.set_default_tensor_type('torch.FloatTensor')
        model = network.Network()
        check_point = torch.load(model_file)
        model.load_state_dict(check_point['state_dict'])
        model.cuda(device)
        model.eval()

        np.set_printoptions(precision=2, suppress=True)

        win_name = 'process{}'.format(id)

        is_break = False
        while True:
            file_name = file_queue.get(timeout=5)
            if file_name is None:
                break

            img_path = os.path.join(base_path, file_name)
            cap = cv2.VideoCapture(img_path)
            # print('Proc: {}, {}'.format(id, img_path))

            if WRITE_FILE:
                out_data_file_name = os.path.splitext(img_path)[0]+'.cd.dat'
                out_dat_file = open(out_data_file_name, 'wb')

            imgs = []
            frame_id = 0
            bbox_count = 0
            while True:
                grabbed, image_bgr = cap.read()

                if SHOW:
                    if not grabbed:
                        break
                    imgs = [image_bgr]
                else:
                    if not grabbed:
                        if len(imgs) < 1:
                            break
                    else:
                        imgs.append(image_bgr)
                        if len(imgs) < BATCH_SIZE:
                            continue

                results = inference(model, imgs, device)

                for i, result in enumerate(results):
                    # print(i, imgs[i])
                    if result.size > 0:
                        j = 0
                        bboxes = get_box(imgs[i], result, score_thr=THRESHOLDS[j], show=SHOW, color=COLORS[j])
                        if (bboxes is not None):
                            if WRITE_FILE:
                                for k, bbox in enumerate(bboxes):
                                    # bbox_info = '{},{},{},{},{},{},{}\n'.format(frame_id, j, bbox[0], bbox[1], bbox[2], bbox[3], bbox[4])
                                    dat = struct.pack('6i1f', frame_id, j, bbox[0], bbox[1], bbox[2], bbox[3], bbox[4])
                                    # dat_list = struct.unpack('6i1f', dat)
                                    # print(len(dat), dat_list)
                                    out_dat_file.write(dat)
                                    bbox_count += 1
                    frame_id += 1
                    if SHOW:
                        cv2.imshow(win_name, imgs[i])

                imgs.clear()
                if SHOW:
                    if 0 == SLEEP_TIME:
                        k = cv2.waitKey()
                    else:
                        k = cv2.waitKey(SLEEP_TIME)
                    if k == 27:  # Esc key to stop
                        is_break = True
                        break
                else:
                    if not grabbed:
                        break

            if WRITE_FILE:
                out_dat_file.close()
                out_queue.put((file_name, bbox_count, id))
                os.system('chmod a+wr {}'.format(out_data_file_name))
            if is_break:
                break

            count += 1
            # if count % 10 == 0:
            #     print('Proc:', id, 'File Count:', count)
    except Exception as e:
        print('Proc:', id, 'File Count:', count, 'finish!')
    if SHOW:
        cv2.destroyAllWindows()


def main():

    video_list = []
    with open(os.path.join(base_path, video_list_file), 'r') as file:
        for video_name in file.readlines():
            video_list.append(video_name.split()[0].strip())

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    if WRITE_FILE:
        out_file_name = os.path.join(out_path, 'video_bbox_count.cd.txt')
        out_file = open(out_file_name, 'w')

    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()
    workers = []
    for i in range(args.proccess_count):
        workers.append(multiprocessing.Process(target=detect_proc, args=(file_queue, out_queue, i,)))

    for i in range(args.proccess_count):
        workers[i].start()

    for video_path in video_list:
        file_queue.put(video_path)

    total_file_count = len(video_list)
    file_count = 0
    try:
        is_first = True
        while True:
            if is_first:
                is_first = False
                file_info = out_queue.get(block=True)
            else:
                file_info = out_queue.get(timeout=10)
            if file_info is None:
                break
            if WRITE_FILE:
                file_name, bbox_count, id = file_info
                out_info = '{},{}\n'.format(file_name, bbox_count)
                out_file.write(out_info)
                file_count += 1
                print('Proc{}, File Count: {}/{}, {}, {}'.format(id, file_count, total_file_count, file_name, bbox_count))
    except Exception as e:
        print(e)

    for i in range(args.proccess_count):
        workers[i].join()

    try:
        while True:
            file_info = out_queue.get(timeout=10)
            if file_info is None:
                break
            if WRITE_FILE:
                file_name, bbox_count, id = file_info
                out_info = '{},{}\n'.format(file_name, bbox_count)
                out_file.write(out_info)
                file_count += 1
                print('Proc{}, File Count: {}/{}, {}, {}'.format(id, file_count, total_file_count, file_name, bbox_count))
    except Exception as e:
        print(e)

    # for video_path in video_list:
    #     file_queue.put(video_path)
    #     detect_proc(file_queue, out_queue, 0)

    if WRITE_FILE:
        out_file.close()
        os.system('chmod a+wr {}'.format(out_file_name))
    print('Finish!')


if __name__ == '__main__':
    main()
