import json
import os
from easydict import EasyDict

# import pytorch modules ..
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.utils.tensorboard as tensorboard    
torch.set_printoptions(precision=2)

# import others modules ..
import numpy as np
import cv2

# import self modules ..
from modules.retinaface.multiboxloss import MultiBoxLoss
from modules.retinaface.priorbox import PriorBox
from modules.retinaface.retinaface import RetinaFace
from utils.box import decode_boxes, decode_landmarks, py_cpu_nms

# <Class: FaceDetector/>
class FaceDetector(nn.Module):
    def __init__(self, args_json="./args.json"):
        super().__init__()
        
        with open(args_json, "r") as fp:
            self._args = EasyDict(json.load(fp))
        # end-with
        
        self._device  = "cuda:{0}".format(self._args.gpu) if torch.cuda.is_available() and self._args.gpu >= 0 else "cpu"
        # next ..
        checkpoint_dict = torch.load(os.path.join(self._args.checkpoints_folder, self._args.run_name, "best_valloss_checkpoint.pth.tar"))
        # next ..
        self._net = RetinaFace(self._args.network_backone)        
        self._net.load_state_dict(checkpoint_dict["state_dict"])
        self._net.to(self._device)
        self._net.eval()
        # next ..
        priorbox = PriorBox(
            min_sizes = self._args.prior_min_sizes,
            steps = self._args.prior_steps, 
            clip = self._args.prior_if_clip, 
            image_size = ( self._args.image_size, self._args.image_size )
            )
        with torch.no_grad():
            self._priors = priorbox.forward()
            self._priors = self._priors.to(self._device)
        # end-with
        # next ..
        pass

    def _pre_process(self, origin_image):
        dsize = ( self._args.image_size, self._args.image_size )
        interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
        interp_method = interp_methods[0]
        image = cv2.resize(origin_image, dsize, interpolation=interp_method)
        image = image.astype(np.float32) / 255.0
        image -= self._args.normalize_mean
        image /= self._args.normalize_std
        image = image.transpose(2, 0, 1)
        tensor = torch.from_numpy(image).unsqueeze(0)
        tensor = tensor.to(self._device)
        return tensor

    def _post_process(self, output_loc, output_conf, output_landm, origin_image, score_threshold, nms_iou_threshold):
        # get scales
        landmarks_scale =  np.array([origin_image.shape[1], origin_image.shape[0]], np.float32)
        boxes_scale =  np.array([origin_image.shape[1], origin_image.shape[0], origin_image.shape[1], origin_image.shape[0]], np.float32)
        
        # decode boxes 
        decoded_boxes  = decode_boxes(output_loc.squeeze(0), self._priors, self._args.loss_variance).cpu().detach().numpy() * boxes_scale
        decoded_scores = F.softmax(output_conf, dim=-1).squeeze(0)[:, 1].cpu().detach().numpy()
        decoded_landmarks = decode_landmarks(output_landm.squeeze(0), self._priors, self._args.loss_variance).reshape(-1, 5, 2).cpu().detach().numpy() * landmarks_scale
        
        # ignore low scores
        keep = np.where(decoded_scores > score_threshold)[0] # self.args.confidence_threshold
        filtered_scores    = decoded_scores[keep]
        filtered_boxes     = decoded_boxes[keep]
        filtered_landmarks = decoded_landmarks[keep]

        # nms the boxes ..
        keep = py_cpu_nms(
            np.concatenate( ( filtered_boxes, filtered_scores.reshape(-1, 1) ), 1 ), 
            nms_iou_threshold # self.nms_threshold
            )
        kept_scores = filtered_scores[keep]
        kept_boxes = filtered_boxes[keep]
        kept_landmarks = filtered_landmarks[keep]

        return kept_boxes, kept_scores, kept_landmarks

    def apply_image(self, image, score_threshold=0.5, nms_iou_threshold=0.5):
        tensor = self._pre_process(image)
        output_loca, output_conf, out_landm = self._net(tensor)
        boxes, scores, landmarks = self._post_process(output_loca, output_conf, out_landm, image, score_threshold, nms_iou_threshold)
        return boxes, scores, landmarks

# <Class: /FaceDetector>

if __name__ == "__main__":
    detector = FaceDetector("./checkpoints/retinaface_2020_06_25_18_19_08/args.json")
    image = cv2.imread("data/wider_face/WIDER_test/images/12--Group/12_Group_Group_12_Group_Group_12_13.jpg", 1)
    boxes, scores, landmarks = detector.apply_image(image)

    # draw the results
    for box, score, landmark in zip(boxes, scores, landmarks):
        cv2.rectangle( image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1 )
        cv2.putText( image, "{:.4f}".format(score), (int(box[0]), int(box[1])+12), cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 255) )
        cv2.circle( image, (landmark[0][0], landmark[0][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[1][0], landmark[1][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[2][0], landmark[2][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[3][0], landmark[3][1]), 1, (0, 0, 255), 2)
        cv2.circle( image, (landmark[4][0], landmark[4][1]), 1, (0, 0, 255), 2)
    # end-for
    cv2.imshow("image", image)
    cv2.waitKey(0)
