import torch
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import utils
import model
from torchvision import transforms
import time
import os
import cv2
from torchvision import datasets, models, transforms
data_transforms = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

class Detector:
    def __init__(self, pnet_param="./param/p_net.pth", rnet_param="./param/r_net.pth", onet_param="./param/o_net.pth",
                 isCuda=False):
        self.isCuda = isCuda
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.pnet = model.PNet().to(self.device)
        self.rnet = model.RNet().to(self.device)
        self.onet = model.ONet().to(self.device)

        if self.isCuda:
            self.pnet.cuda()
            self.rnet.cuda()
            self.onet.cuda()

        self.pnet.load_state_dict(torch.load(pnet_param, map_location='cuda'))
        self.rnet.load_state_dict(torch.load(rnet_param, map_location='cuda'))
        self.onet.load_state_dict(torch.load(onet_param, map_location='cuda'))

        self.pnet.eval()
        self.rnet.eval()
        self.onet.eval()

        self.__image_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])

    def detect(self, image):
        start_time = time.time()  # p网络开始时间

        pnet_boxes = self.__pnet_detect(image)

        if pnet_boxes.shape[0] == 0:
            return np.array([])

        end_time = time.time()  # p网络结束时间
        t_pnet = end_time - start_time  # 计算图像进出p网络所花的时间

        start_time = time.time()  # r网络开始时间
        rnet_boxes = self.__rnet_detect(image, pnet_boxes)  # p网络输出的框和原图像输送到R网络中，O网络将框扩为正方形再进行裁剪，再缩放
        # print( rnet_boxes)
        if rnet_boxes.shape[0] == 0:
            return np.array([])
        end_time = time.time()  # r网络结束时间
        t_rnet = end_time - start_time  # 计算图像进出r网络所花的时间

        start_time = time.time()  # o网络开始时间
        onet_boxes = self.__onet_detect(image, rnet_boxes)
        if onet_boxes.shape[0] == 0:
            return np.array([])
        end_time = time.time()  # o网络结束时间

        t_onet = end_time - start_time  # 计算图像进出o网络所花的时间

        # t_sum = t_pnet + t_rnet + t_onet                   #计算三个网络总共花费的时间

        # print("total:{0} pnet:{1} rnet:{2} onet:{3}".format(t_sum, t_pnet, t_rnet, t_onet))

        return onet_boxes
        # return pnet_boxes
        # return rnet_boxes

    def __pnet_detect(self, img, stride=2, side_len=12):

        boxes = []
        w, h = img.size
        min_side_len = min(w, h)

        scale = 1

        # while min_side_len >= 12:
        while min_side_len >= 60:
            img_data = self.__image_transform(img)
            img_data = img_data.unsqueeze_(0)
            if self.isCuda:
                img_data = img_data.cuda()

            _cls, _offest = self.pnet(img_data)  # NCHW

            cls, offset = _cls[0][0].cpu().data, _offest[0].cpu().data

            idxs = torch.nonzero(torch.gt(cls, 0.6), as_tuple=False)  # 取出置信度大于0.6的索引

            _x1 = torch.div((idxs[:, 1] * stride),scale,rounding_mode="floor")
            _y1 = torch.div((idxs[:, 0] * stride),scale,rounding_mode="floor")
            _x2 = _x1 + side_len // scale
            _y2 = _y1 + side_len // scale

            _w = _x2 - _x1
            _h = _y2 - _y1
            x1 = offset[0, idxs[:, 0], idxs[:, 1]] * _w + _x1
            y1 = offset[1, idxs[:, 0], idxs[:, 1]] * _h + _y1
            x2 = offset[2, idxs[:, 0], idxs[:, 1]] * _w + _x2
            y2 = offset[3, idxs[:, 0], idxs[:, 1]] * _h + _y2
            _cls = cls[idxs[:, 0], idxs[:, 1]]
            box = torch.stack([x1, y1, x2, y2, _cls], dim=1)
            boxes.extend(box.numpy())

            scale *= 0.709
            _w = int(w * scale)
            _h = int(h * scale)

            img = img.resize((_w, _h))
            min_side_len = np.minimum(_w, _h)
        return utils.nms(np.array(boxes), 0.3)

    def __rnet_detect(self, image, pnet_boxes):

        _pnet_boxes = utils.convert_to_square(pnet_boxes)

        _x1 = _pnet_boxes[:, 0]
        _y1 = _pnet_boxes[:, 1]
        _x2 = _pnet_boxes[:, 2]
        _y2 = _pnet_boxes[:, 3]

        _box = np.stack((_x1, _y1, _x2, _y2), axis=1)
        _img_dataset = [self.__image_transform(image.crop(x).resize((24, 24))) for x in _box]
        img_dataset = torch.stack(_img_dataset)

        if self.isCuda:
            img_dataset = img_dataset.cuda()

        _cls, _offset = self.rnet(img_dataset)
        _cls = _cls.cpu().data.numpy()
        offset = _offset.cpu().data.numpy()

        idxs, _ = np.where(_cls > 0.7)

        _box = _pnet_boxes[idxs]
        _x1 = _box[:, 0]
        _y1 = _box[:, 1]
        _x2 = _box[:, 2]
        _y2 = _box[:, 3]

        ow = _x2 - _x1
        oh = _y2 - _y1

        x1 = _x1 + ow * offset[idxs, 0]
        y1 = _y1 + oh * offset[idxs, 1]
        x2 = _x2 + ow * offset[idxs, 2]
        y2 = _y2 + oh * offset[idxs, 3]
        cls = _cls[idxs, 0]

        boxes = np.stack([x1, y1, x2, y2, cls], axis=1)
        return utils.nms(np.array(boxes), 0.3)

    def __onet_detect(self, image, rnet_boxes):

        _img_dataset = []
        _rnet_boxes = utils.convert_to_square(rnet_boxes)

        _x1 = _rnet_boxes[:, 0]
        _y1 = _rnet_boxes[:, 1]
        _x2 = _rnet_boxes[:, 2]
        _y2 = _rnet_boxes[:, 3]

        _box = np.stack((_x1, _y1, _x2, _y2), axis=1)
        _img_dataset = [self.__image_transform(image.crop(x).resize((48, 48))) for x in _box]
        img_dataset = torch.stack(_img_dataset)
        if self.isCuda:
            img_dataset = img_dataset.cuda()

        _cls, _offset = self.onet(img_dataset)

        _cls = _cls.cpu().data.numpy()
        offset = _offset.cpu().data.numpy()
        idxs, _ = np.where(_cls > 0.99)

        _box = _rnet_boxes[idxs]
        _x1 = _box[:, 0]
        _y1 = _box[:, 1]
        _x2 = _box[:, 2]
        _y2 = _box[:, 3]

        ow = _x2 - _x1
        oh = _y2 - _y1

        x1 = _x1 + ow * offset[idxs, 0]
        y1 = _y1 + oh * offset[idxs, 1]
        x2 = _x2 + ow * offset[idxs, 2]
        y2 = _y2 + oh * offset[idxs, 3]
        cls = _cls[idxs, 0]

        boxes = np.stack([x1, y1, x2, y2, cls], axis=1)
        return utils.nms(np.array(boxes), 0.3, isMin=True)



if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    cap.set(4, 500)
    cap.set(3, 500)
    fps = int(round(cap.get(cv2.CAP_PROP_FPS)))
    w = int(cap.get(3))
    h = int(cap.get(4))

    # device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
    # fakeFaceModel = models.vgg16().to(device)
    # fakeFaceModel.load_state_dict(torch.load("./fakefacenet/fakeFace.pth"))
    # fakeFaceModel.to('cpu')


    # font = ImageFont.truetype("C:\Windows\Fonts\ARLRDBD.TTF", size=40)
    while True:

        ret, im = cap.read()
        if cv2.waitKey(30) & 0xFF == ord("q"):
            break
        elif ret == False:
            break

        # outputs = fakeFaceModel(data_transforms(Image.fromarray(im).convert('RGB')))
        # _, predicted = torch.max(outputs, 1)
        # outstring = ""
        # if (predicted == 1):
        #     outstring = "real face"
        # else:
        #     outstring = "fake face"

        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        im = Image.fromarray(np.uint8(im))
        # im=np.asarray(im)
        with torch.no_grad() as grad:
            detector = Detector(isCuda=True)
            boxes = detector.detect(im)
            for box in boxes:
                x1 = int(box[0])
                y1 = int(box[1])
                x2 = int(box[2])
                y2 = int(box[3])
                # cls = box[4]
                im = cv2.cvtColor(np.uint8(im), cv2.COLOR_RGB2BGR)
                cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 3)
                im = np.array(im).astype('uint8')
                # cv2.rectangle(im, (x1, y1,), (x2, y2), (255, 0, 0), 3)
                # cv2.putText(im, outstring, (0,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            im = cv2.cvtColor(np.uint8(im), cv2.COLOR_BGR2RGB)
            cv2.imshow("", np.uint8(im))
