from __future__ import print_function

import logging as log
import os
import sys
from collections import defaultdict
from io import StringIO
import cv2
import numpy as np

import argparse
import os
import os.path as osp
import pickle
import shutil
import tempfile
import numpy as np
from tqdm import tqdm

import mmcv
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint

from mmdet.core import coco_eval, results2json, wrap_fp16_model, multiclass_nms
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from mmdet.apis import init_detector, inference_detector, show_result

label_id_map = {
    0: "nomask",
    1: "mask"
}

def init():
    """Initialize model

    Returns: model

    """
    config_file = '/DATA/ybli/openvino_training_extensions/pytorch_toolkit/object_detection/configs/mask-detection-0105.py'
    checkpoint_file = '/DATA/ybli/openvino_training_extensions/pytorch_toolkit/object_detection/output/mask/latest.pth'
    model = init_detector(config_file, checkpoint_file, device='cuda:0')
    return model


def process_image(model, input_image, thresh):
    """Do inference to analysis input_image and get output

    Attributes:
        net: model handle
        input_image (numpy.ndarray): image to be process, format: (h, w, c), BGR
        thresh: thresh value

    Returns: process result

    """

    # ------------------------------- Prepare input -------------------------------------
    if not model or input_image is None:
        log.error('Invalid input args')
        return None
    ih, iw, _ = input_image.shape

    # if ih != input_h or iw != input_w:
    #     input_image = cv2.resize(input_image, (input_w, input_h))
    #input_image = np.expand_dims(input_image, axis=0)

    # --------------------------- Performing inference ----------------------------------
    model.eval()
    results = inference_detector(model, input_image)
    bbox_result = results
    bboxes = np.vstack(bbox_result)
    labels = [
    np.full(bbox.shape[0], i, dtype=np.int32)
    for i, bbox in enumerate(bbox_result)
    ]
    labels = np.concatenate(labels)
    scores = bboxes[:, -1]
    inds = scores > thresh
    bboxes = bboxes[inds, :]
    labels = labels[inds]
    detect_objs = []
    for bbox, label in zip(bboxes, labels):
        bbox_int = bbox.astype(np.int32)
        left_top = (bbox_int[0], bbox_int[1])
        right_bottom = (bbox_int[2], bbox_int[3])
        detect_objs.append({
            'label': label_id_map[int(label)],
            'prob': bbox[4],
            'box': {
                'xmin': bbox_int[0],
                'ymin': bbox_int[1],
                'xmax': bbox_int[2],
                'ymax': bbox_int[3]
            }
        })
    return detect_objs

    
if __name__ == '__main__':
    """Test python api
    """
    img = cv2.imread('/DATA/ybli/MaskDetection/mask/JPEGImages/2007_000129.jpg')
    predictor = init()
    result = process_image(predictor, img, 0.5)
    log.info(result)
