from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from Frcnn import _init_paths
import os
import numpy as np
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
from Frcnn.lib.model.utils.config import cfg, cfg_from_file, cfg_from_list
from Frcnn.lib.model.rpn.bbox_transform import clip_boxes
from Frcnn.lib.model.roi_layers import nms
from Frcnn.lib.model.rpn.bbox_transform import bbox_transform_inv
from Frcnn.lib.model.utils.net_utils import vis_detections
from Frcnn.lib.model.utils.blob import im_list_to_blob
from Frcnn.lib.model.faster_rcnn.vgg16 import vgg16
from Frcnn.lib.model.faster_rcnn.resnet import resnet

try:
    xrange  # Python 2
except NameError:
    xrange = range  # Python 3

class FRCNN(object):

    def __init__(self):
        self.checkepoch = 31
        self.checkpoint = 1281
        self.checksession = 2
        self.cfg_file = "Frcnn/cfgs/res101.yml"
        self.dataset = "pascal_voc"
        self.load_dir = '/home/vision/Data/FrcnnModels'
        self.net = 'res101'
        self.set_cfgs = None
        self.class_agnostic = False
        self.cuda = True

        if self.cfg_file is not None:
            cfg_from_file(self.cfg_file)
        if self.set_cfgs is not None:
            cfg_from_list(self.set_cfgs)

        cfg.USE_GPU_NMS = self.cuda

        print('Using config:')
        pprint.pprint(cfg)
        np.random.seed(cfg.RNG_SEED)

        # train set
        # -- Note: Use validation set and disable the flipped to enable faster loading.

        input_dir = self.load_dir + "/" + self.net + "/" + self.dataset
        if not os.path.exists(input_dir):
            raise Exception('There is no input directory for loading network from ' + input_dir)
        load_name = os.path.join(input_dir,
                                 'faster_rcnn_{}_{}_{}.pth'.format(self.checksession, self.checkepoch, self.checkpoint))

        self.pascal_classes = np.asarray(
            ['__background__', 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle',
             'awning-tricycle', 'bus', 'motor', 'vehicle'])

        # initilize the network here.
        if self.net == 'vgg16':
            self.fasterRCNN = vgg16(self.pascal_classes, pretrained=False, class_agnostic=self.class_agnostic)
        elif self.net == 'res101':
            self.fasterRCNN = resnet(self.pascal_classes, 101, pretrained=False, class_agnostic=self.class_agnostic)
        elif self.net == 'res50':
            self.fasterRCNN = resnet(self.pascal_classes, 50, pretrained=False, class_agnostic=self.class_agnostic)
        elif self.net == 'res152':
            self.fasterRCNN = resnet(self.pascal_classes, 152, pretrained=False, class_agnostic=self.class_agnostic)
        else:
            print("network is not defined")
            pdb.set_trace()

        self.fasterRCNN.create_architecture()

        print("load checkpoint %s" % (load_name))
        if self.cuda > 0:
            checkpoint = torch.load(load_name)
        else:
            checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
        self.fasterRCNN.load_state_dict(checkpoint['model'])
        if 'pooling_mode' in checkpoint.keys():
            cfg.POOLING_MODE = checkpoint['pooling_mode']

        print('load model successfully!')

        # pdb.set_trace()

        print("load checkpoint %s" % (load_name))

        # initilize the tensor holder here.
        im_data = torch.FloatTensor(1)
        im_info = torch.FloatTensor(1)
        num_boxes = torch.LongTensor(1)
        gt_boxes = torch.FloatTensor(1)

        # ship to cuda
        if self.cuda > 0:
            im_data = im_data.cuda()
            im_info = im_info.cuda()
            num_boxes = num_boxes.cuda()
            gt_boxes = gt_boxes.cuda()

        # make variable
        with torch.no_grad():
            self.im_data = Variable(im_data)
            self.im_info = Variable(im_info)
            self.num_boxes = Variable(num_boxes)
            self.gt_boxes = Variable(gt_boxes)

        if self.cuda > 0:
            cfg.CUDA = True

        if self.cuda > 0:
            self.fasterRCNN.cuda()

        self.fasterRCNN.eval()

        self.start = time.time()
        self.max_per_image = 100
        self.thresh = 0.05

    def _get_image_blob(self,im):
        im_orig = im.astype(np.float32, copy=True)
        im_orig -= cfg.PIXEL_MEANS

        im_shape = im_orig.shape
        im_size_min = np.min(im_shape[0:2])
        im_size_max = np.max(im_shape[0:2])

        processed_ims = []
        im_scale_factors = []

        for target_size in cfg.TEST.SCALES:
            im_scale = float(target_size) / float(im_size_min)
            # Prevent the biggest axis from being more than MAX_SIZE
            if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
                im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
            im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                            interpolation=cv2.INTER_LINEAR)
            im_scale_factors.append(im_scale)
            processed_ims.append(im)

        # Create a blob to hold the input images
        blob = im_list_to_blob(processed_ims)

        return blob, np.array(im_scale_factors)

    def detectionImg(self,im_in):
        saveImage = False
        if len(im_in.shape) == 2:
            im_in = im_in[:, :, np.newaxis]
            im_in = np.concatenate((im_in, im_in, im_in), axis=2)
        # rgb -> bgr
        im = im_in[:, :, ::-1]

        blobs, im_scales = self._get_image_blob(im)
        assert len(im_scales) == 1, "Only single-image batch implemented"
        im_blob = blobs
        im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)

        im_data_pt = torch.from_numpy(im_blob)
        im_data_pt = im_data_pt.permute(0, 3, 1, 2)
        im_info_pt = torch.from_numpy(im_info_np)

        with torch.no_grad():
            self.im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
            self.im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
            self.gt_boxes.resize_(1, 1, 5).zero_()
            self.num_boxes.resize_(1).zero_()

        # pdb.set_trace()
        det_tic = time.time()

        rois, cls_prob, bbox_pred, \
        rpn_loss_cls, rpn_loss_box, \
        RCNN_loss_cls, RCNN_loss_bbox, \
        rois_label = self.fasterRCNN(self.im_data, self.im_info, self.gt_boxes, self.num_boxes)

        scores = cls_prob.data
        boxes = rois.data[:, :, 1:5]

        if cfg.TEST.BBOX_REG:
            # Apply bounding-box regression deltas
            box_deltas = bbox_pred.data
            if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
                # Optionally normalize targets by a precomputed mean and stdev
                if self.class_agnostic:
                    if self.cuda > 0:
                        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
                                     + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
                    else:
                        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
                                     + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)

                    box_deltas = box_deltas.view(1, -1, 4)
                else:
                    if self.cuda > 0:
                        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
                                     + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
                    else:
                        box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
                                     + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
                    box_deltas = box_deltas.view(1, -1, 4 * len(self.pascal_classes))

            pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
            pred_boxes = clip_boxes(pred_boxes, self.im_info.data, 1)
        else:
            # Simply repeat the boxes, once for each class
            pred_boxes = np.tile(boxes, (1, scores.shape[1]))

        pred_boxes /= im_scales[0]

        scores = scores.squeeze()
        pred_boxes = pred_boxes.squeeze()
        det_toc = time.time()
        detect_time = det_toc - det_tic
        misc_tic = time.time()
        im2show = np.copy(im)
        for j in xrange(1, len(self.pascal_classes)):
            inds = torch.nonzero(scores[:, j] > self.thresh, as_tuple=False).view(-1)
            # if there is det
            if inds.numel() > 0:
                cls_scores = scores[:, j][inds]
                _, order = torch.sort(cls_scores, 0, True)
                if self.class_agnostic:
                    cls_boxes = pred_boxes[inds, :]
                else:
                    cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]

                cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
                # cls_dets = torch.cat((cls_boxes, cls_scores), 1)
                cls_dets = cls_dets[order]
                # keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)
                keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
                cls_dets = cls_dets[keep.view(-1).long()]
                im2show,Flag = vis_detections(im2show, self.pascal_classes[j], cls_dets.cpu().numpy(), 0.5)
                if self.pascal_classes[j] == "vehicle" and Flag:
                    saveImage = True
        misc_toc = time.time()
        nms_time = misc_toc - misc_tic
        im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)
        return im2showRGB,saveImage

if __name__ == "__main__":
    img = cv2.imread("/home/vision/Data/DataSet/Poscal_VOC/VOC2007/JPEGImages/000120.JPG")
    app = FRCNN()
    result,flag = app.detectionImg(img)
    cv2.imshow("frcnn",result)
    cv2.waitKey(0)