import torch
from torch import nn
from torchvision import transforms

import numpy as np

import cv2
import math
from utils.NMS import myNMSnp as nms
from utils.NMS import myNMS as nms1
import time
import os
from face_align import face_align,warp_and_crop_face
from tqdm import tqdm

P_CONF_THRESHOLD = 0.7
R_CONF_THRESHOLD = 0.9
O_CONF_THRESHOLD = 0.999
P_IOU_THRESHOLD = 0.5
R_IOU_THRESHOLD = 0.5
O_IOU_THRESHOLD = 0.2


class PNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.prelayer = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3, stride=1),
            nn.PReLU(),
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(10, 16, kernel_size=3, stride=1),
            nn.PReLU(),
            nn.Conv2d(16, 32, kernel_size=3, stride=1),
            nn.PReLU()
        )

        self.conv4_1 = nn.Conv2d(32, 1, kernel_size=1, stride=1)
        self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1, stride=1)

    # 这里没有加人脸关键点回归
    def forward(self, x):
        x = self.prelayer(x)
        cond = torch.sigmoid(self.conv4_1(x))
        offset = self.conv4_2(x)
        _ = []
        return cond, offset, _


class RNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.prelayer = nn.Sequential(
            nn.Conv2d(3, 28, 3, 1),
            nn.MaxPool2d(2, 2),
            nn.PReLU(),

            nn.Conv2d(28, 48, 3, 1),
            nn.MaxPool2d(2, 2),
            nn.PReLU(),

            nn.Conv2d(48, 64, 2, 1),
            nn.PReLU(),

        )
        self.outlayer = nn.Sequential(
            nn.Linear(3 * 3 * 64, 128),
            nn.PReLU(),
        )
        self.out_1 = nn.Linear(128, 1)  # 置信度
        self.out_2 = nn.Linear(128, 4)  # 人脸框
        self.out_3 = nn.Linear(128, 10)  # 关键点

    # 增加人脸关键点回归
    def forward(self, x):
        x = self.prelayer(x)
        x = x.reshape(-1, 576)
        out = self.outlayer(x)

        cond = torch.sigmoid(self.out_1(out))
        box = self.out_2(out)
        landmark = self.out_3(out)
        return cond, box, landmark


class ONet(nn.Module):
    def __init__(self):
        super().__init__()
        self.prelayer = nn.Sequential(
            nn.Conv2d(3, 32, 3, 1),
            nn.MaxPool2d(2, 2),
            nn.PReLU(),

            nn.Conv2d(32, 64, 3, 1),  # 不加padding,刚好让输出为10通道
            nn.MaxPool2d(2, 2),
            nn.PReLU(),

            nn.Conv2d(64, 64, 3, 1),
            nn.MaxPool2d(2, 2),
            nn.PReLU(),

            nn.Conv2d(64, 128, 2, stride=1),
            nn.PReLU()
        )
        self.outlayer = nn.Sequential(
            nn.Linear(3 * 3 * 128, 256),
            nn.PReLU()
        )

        self.out_1 = nn.Linear(256, 1)  # 置信度
        self.out_2 = nn.Linear(256, 4)  # 人脸框
        self.out_3 = nn.Linear(256, 10)  # 关键点

    def forward(self, x):
        x = self.prelayer(x)
        x = x.reshape(-1, 1152)
        out = self.outlayer(x)

        cond = torch.sigmoid(self.out_1(out))
        box = self.out_2(out)
        landmark = self.out_3(out)
        return cond, box, landmark


class MTCNN_DET():
    def __init__(self, use_gpu):
        self.pnet = PNet()
        self.rnet = RNet()
        self.onet = ONet()

        self.pnet.load_state_dict(torch.load(r'mtcnn_weights\pnet.t'))
        self.rnet.load_state_dict(torch.load(r'mtcnn_weights\rnet.t'))
        self.onet.load_state_dict(torch.load(r'mtcnn_weights\onet.t'))
        self.use_gpu = use_gpu
        self.transfm = transforms.ToTensor()
        if use_gpu:
            self.pnet = self.pnet.cuda()
            self.rnet = self.rnet.cuda()
            self.onet = self.onet.cuda()

    # 传入的图片格式必须为RGB,且未做过归一化
    def __call__(self, img):
        min_face_size = 20.0

        height, width = img.shape[0], img.shape[1]

        min_length = min(height, width)

        min_detection_size = 12
        factor = 0.707  # sqrt(0.5)

        # scales for scaling the image
        scales = []

        # scales the image so that
        # minimum size that we can detect equals to
        # minimum face size that we want to detect
        m = min_detection_size / min_face_size
        min_length *= m

        factor_count = 0
        while min_length > min_detection_size:
            scales.append(m * factor ** factor_count)
            min_length *= factor
            factor_count += 1

        # 加入no_grad，提升模型的推理速度
        with torch.no_grad():
            pnet_ret = []
            start = time.perf_counter()
            # P网络推理  缩放 推理 反算获得坐标 做NMS抑制
            for s in scales:
                # 缩放图片
                img_ = cv2.resize(img, (math.ceil(height * s), math.ceil(width * s)))
                # 推理
                img_ = self.transfm(img_)
                if self.use_gpu:
                    img_ = img_.cuda()

                ret = self.run_first_stage(img_, s, P_CONF_THRESHOLD)
                if ret.shape[0] != 0:
                    pnet_ret.append(ret)

            if pnet_ret == []:
                print("no person in this image")
                return []
            # 将列表堆叠成向量
            pnet_ret = torch.cat(pnet_ret, dim=0)
            # 还原P网络输出的坐标
            coords = self.get_coordinates(pnet_ret[:, 5:])  # 反算得到P网络在原图的坐标
            # 反算P网络建议框坐标
            pnet_ret[:, 1:5] = self.get_correctbox(pnet_ret[:, 1:5], coords)
            # 对P网络输出做NMS

            p_out = nms1(pnet_ret[:, :5])

            end = time.perf_counter()
            # print(end - start)
            return p_out

    def run_first_stage(self, img, scale, threshold):
        # 输出形状为 N,V结构 confidence box_offset s idxs_h idxs_w
        conf, box, _ = self.pnet(img.unsqueeze_(dim=0))
        conf = conf[0][0].cpu().detach()
        box = box[0].cpu().detach()
        idxs = torch.nonzero(torch.gt(conf, threshold), as_tuple=False)

        conf = conf[idxs[:, 0], idxs[:, 1]].reshape(-1, 1)
        offset = box[:, idxs[:, 0], idxs[:, 1]].t()  # 转置，从4N转成N4
        scale = torch.ones_like(conf).reshape(-1, 1) * scale

        return torch.cat([conf, offset, scale, idxs.float()], dim=1)

    def get_coordinates(self, idxs, featuremap_size=12):
        # 输入形状为N3 scale idxs_h,idxs_w
        # 返回形状为N4
        pts0 = torch.div(idxs[:, 1:] * 2, idxs[:, [0]])
        pts1 = torch.div(idxs[:, 1:] * 2 + featuremap_size, idxs[:, [0]])

        return torch.cat([pts0, pts1], dim=1)

    def get_correctbox(self, offset, coordinates):
        h = coordinates[:, [2]] - coordinates[:, [0]]
        w = coordinates[:, [3]] - coordinates[:, [1]]

        wh = torch.cat([h, w, h, w], dim=1)

        return offset * wh + coordinates


class MTCNN_DET_np():
    def __init__(self, use_gpu):
        self.pnet = PNet()
        self.rnet = RNet()
        self.onet = ONet()

        self.pnet.load_state_dict(torch.load(r'mtcnn_weights\pnet.t'))
        self.rnet.load_state_dict(torch.load(r'mtcnn_weights\rnet.t'))
        self.onet.load_state_dict(torch.load(r'mtcnn_weights\onet.t'))
        self.use_gpu = use_gpu
        self.transfm = transforms.ToTensor()
        if use_gpu:
            self.pnet = self.pnet.cuda()
            self.rnet = self.rnet.cuda()
            self.onet = self.onet.cuda()

    # 传入的图片格式必须为RGB,且未做过归一化
    def __call__(self, img):

        start = time.perf_counter()
        min_face_size = 20.0
        height, width = img.shape[0], img.shape[1]
        min_length = min(height, width)
        min_detection_size = 12
        factor = 0.707  # sqrt(0.5)
        # scales for scaling the image
        scales = []
        # scales the image so that
        # minimum size that we can detect equals to
        # minimum face size that we want to detect
        m = min_detection_size / min_face_size
        min_length *= m

        factor_count = 0
        while min_length > min_detection_size:
            scales.append(m * factor ** factor_count)
            min_length *= factor
            factor_count += 1

        # 加入no_grad，提升模型的推理速度
        with torch.no_grad():
            pnet_ret = []
            pnet_images = []
            # P网络推理  缩放 推理 反算获得坐标 做NMS抑制
            for s in scales:
                # 缩放图片
                height_ = math.ceil(height * s)
                width_ = math.ceil(width * s)
                img_ = cv2.resize(img, (height_, width_))

                if s == 1.0:
                    pnet_images.append(img_)
                else:

                    # 创建一个空白图片
                    back_img = np.zeros_like(img)

                    # 左上角粘贴图片
                    back_img[0:height_,0:width_] = img_
                    # 中心粘贴图片
                    # start_y = (height - height_) // 2
                    # start_x = (width - width_) // 2
                    # back_img[start_y:start_y + height_, start_x:start_x + width_] = img_
                    pnet_images.append(self.transfm(back_img))
                # 推理
            pnet_images = torch.stack(pnet_images, dim=0)

            # pnet_images = self.transfm(pnet_images)
            if self.use_gpu:
                pnet_images = pnet_images.cuda()
            # 推理
            pnet_ret = self.run_first_stage_detn(pnet_images, np.array(scales), P_CONF_THRESHOLD)

            if pnet_ret == torch.tensor(0):
                print("no person in this image")
                return []
            # 还原P网络输出的坐标
            coords = self.get_coordinates(pnet_ret[:, 5:])  # 反算得到P网络在原图的坐标
            # 对P网络输出做NMS
            pnet_ret[:, 1:5] = self.get_correctbox(pnet_ret[:, 1:5], coords)

            p_out = nms(pnet_ret[:, :5], P_IOU_THRESHOLD)
            endp = time.perf_counter()

            if p_out.shape[0] == 0:
                print("no images in pnet")
                return []
            try:
                # R网络侦测
                cord_boxes = self.convert_to_square(p_out[:, 1:])  # 获取R网络的输入框

                img_boxes, cord_boxes = self.get_image_boxes(img, cord_boxes)
                r_out = self.net_pred(self.rnet, img_boxes, cord_boxes, R_CONF_THRESHOLD)
                r_out = nms(r_out, R_IOU_THRESHOLD)

                endr = time.perf_counter()
                # print(endr - start)

                # print(r_out)
                if r_out.shape[0] == 0:
                    print("no images in rnet")
                    return []
            except:
                print("wrong output in rnetdet")
                return []
            # O网络侦测
            try:
                cord_boxes = self.convert_to_square(r_out[:, 1:5])  # 获取O网络的输入框
                img_boxes, cord_boxes = self.get_image_boxes(img, cord_boxes, 48)
                o_out = self.net_pred(self.onet, img_boxes, cord_boxes, O_CONF_THRESHOLD)
                o_out = nms(o_out, O_IOU_THRESHOLD)
                if o_out.shape[0] == 0:
                    print("no images in pnet")
                    return []
            except:
                print("wrong output in onetdet")
                return []
            endo = time.perf_counter()
            # print(endo - start)

            return o_out.tolist()

    def get_image_boxes(self, img, boxes, size=24):
        ret_images = []
        new_boxes = []
        w, h = img.shape[1], img.shape[0]
        # 获取建议框对应的图像
        for box in boxes:
            # opencv通过 切片方式抠图
            box = box.astype(np.int).tolist()
            # 超出边界的,依然保留
            if box[1] < 0 or box[0] < 0 or box[2] > w or box[3] > h:
                continue

            maxside = max(box[3] - box[1], box[2] - box[0])
            img_slice = img[box[1]:box[1] + maxside, box[0]:box[0] + maxside]

            # resize
            img_slice = cv2.resize(img_slice, (size, size), interpolation=cv2.INTER_NEAREST)

            box[3] = box[1] + maxside
            box[2] = box[0] + maxside
            new_boxes.append(box)
            # 将图片转成tensor (totensor) 并添加到列表中
            ret_images.append(self.transfm(img_slice))

        if ret_images == []:
            print("wrong input")
            return []
        return torch.stack(ret_images, dim=0), np.array(new_boxes)

    def net_pred(self, net, imges, bboxes, threshold):
        # RO网络的推理,返回坐标,包含关键点
        if self.use_gpu:
            imges = imges.cuda()

        conf, box, landmark = net(imges)

        conf = conf.cpu().detach().numpy()
        box = box.cpu().detach().numpy()
        landmark = landmark.cpu().detach().numpy()
        # 计算宽高
        w = bboxes[:, [2]] - bboxes[:, [0]]

        # 计算返回的边框回归值
        box = bboxes + box * w
        # 计算返回的landmark值
        landmark[:, 0::2] = bboxes[:, [0]] + w * landmark[:, ::2]
        landmark[:, 1::2] = bboxes[:, [1]] + w * landmark[:, 1::2]

        ret_items = np.concatenate([conf, box, landmark], axis=1)
        # 返回置信度大于一定值的
        return ret_items[ret_items[:, 0] > threshold]

    def convert_to_square(self, bboxes):
        """Convert bounding boxes to a square form.

        Arguments:
            bboxes: a float numpy array of shape [n, 5].

        Returns:
            a float numpy array of shape [n, 5],
                squared bounding boxes.
        """

        square_bboxes = np.zeros_like(bboxes)
        x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]
        h = y2 - y1 + 1.0
        w = x2 - x1 + 1.0
        max_side = np.maximum(h, w)
        square_bboxes[:, 0] = x1 + w * 0.5 - max_side * 0.5
        square_bboxes[:, 1] = y1 + h * 0.5 - max_side * 0.5
        square_bboxes[:, 2] = square_bboxes[:, 0] + max_side - 1.0
        square_bboxes[:, 3] = square_bboxes[:, 1] + max_side - 1.0
        return square_bboxes

    def run_first_stage(self, img, scale, threshold):
        # 输出形状为 N,V结构 全部转成Numpy操作试下
        conf, box, _ = self.pnet(img.unsqueeze_(dim=0))
        conf = conf[0][0].cpu().detach().numpy()
        box = box[0].cpu().detach().numpy()
        idxs = np.where(conf > threshold)
        idxs = np.vstack(idxs).T

        conf = conf[idxs[:, 0], idxs[:, 1]].reshape(-1, 1)
        offset = box[:, idxs[:, 0], idxs[:, 1]].T  # 转置，从4N转成N4
        scale = np.ones_like(conf).reshape(-1, 1) * scale
        return np.concatenate([conf, offset, scale, idxs.astype(np.float)], axis=1)

    def run_first_stage_detn(self, imges, scales, threshold):
        # 输入形状为 N,C,HW结构 输出也要变成NV
        conf, box, _ = self.pnet(imges)
        conf = conf[:, 0].cpu().detach().numpy()
        box = box.cpu().detach().numpy()
        idxs = np.where(conf > threshold)
        idxs = np.vstack(idxs).T

        conf = conf[idxs[..., 0], idxs[..., 1], idxs[..., 2]].reshape(-1, 1)  # 变成N,1结构
        offset = box[idxs[..., 0], :, idxs[..., 1], idxs[..., 2]].reshape(4, -1).T  # 转置，从4N转成N4
        scale = scales[idxs[..., 0]].reshape(-1, 1)

        return np.concatenate([conf, offset, scale, idxs[..., 1:].astype(np.float)], axis=1)

    def get_coordinates(self, idxs, featuremap_size=12):
        # 输入形状为N3 scale idxs_h,idxs_w
        # 返回形状为N4
        pts0 = idxs[:, 1:] * 2 / idxs[:, [0]]
        pts1 = (idxs[:, 1:] * 2 + featuremap_size) / idxs[:, [0]]
        return np.concatenate((pts0, pts1), axis=1)

    def get_correctbox(self, offset, coordinates):
        h = coordinates[:, [2]] - coordinates[:, [0]]
        w = coordinates[:, [3]] - coordinates[:, [1]]
        wh = np.concatenate([h, w, h, w], axis=1)
        return offset * wh + coordinates


def detone():
    mydet = MTCNN_DET_np(True)
    img = cv2.imread('test_img/013.jpg')
    img_ = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    boxes = mydet(img_)

    print(len(boxes))
    for box in boxes:  # 多个框，没循环一次框一个人脸

        x1 = int(box[1])
        y1 = int(box[2])
        x2 = int(box[3])
        y2 = int(box[4])
        print(box, "w h", x2 - x1, y2 - y1)  # 置信度
        img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), thickness=1)

        # imDraw.rectangle((x1, y1, x2, y2), outline='red', width=2)
        # im.show() # 每循环一次框一个人脸
    cv2.imshow("img", img)
    cv2.waitKey(0)


def Detimges():
    mydet = MTCNN_DET_np(True)

    imgdir = r"e:\CASIA-WebFace\0000045"

    for imgname in os.listdir(imgdir):
        imgpath = fr"{imgdir}\{imgname}"
        print("-----------")
        print(imgpath)
        img = cv2.imread(imgpath)

        img_ = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

        boxes = mydet(img_)
        if len(boxes) == 1:
            # for box in boxes:  # 多个框，没循环一次框一个人脸
            #
            #     # box = box.tolist()
            #     x1 = int(box[1])
            #     y1 = int(box[2])
            #     x2 = int(box[3])
            #     y2 = int(box[4])
            #     # print((x1, y1, x2, y2))
            #     print(box[0])  # 置信度
            #     cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), thickness=1)
            landmark = np.array(boxes[0][5:], dtype=np.float32).reshape(-1, 2)

            alignface = face_align(landmark, img)
            print(alignface.shape)
            cv2.imshow("alignface", alignface)
            cv2.waitKey(0)


def gen_casia_webface_sample():
    # 生成casia样本
    mydet = MTCNN_DET_np(True)
    read_root = r"e:\CASIA-WebFace"
    sa_root = r"e:\mysample"
    sa_index = 0

    if not os.path.exists(sa_root):
        os.mkdir(sa_root)

    for read_folder in tqdm(os.listdir(read_root)):
        read_folder_path = fr"{read_root}/{read_folder}"
        sa_cnt = 0
        for i, img_name in enumerate(os.listdir(read_folder_path)):
            img = cv2.imread(fr"{read_folder_path}/{img_name}")
            img_ = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            boxes = mydet(img_)
            # 只检测到一张人脸时，则保存
            if len(boxes) == 1:
                landmark = np.array(boxes[0][5:], dtype=np.float32).reshape(-1, 2)
                alignface = face_align(landmark, img)
                if not os.path.exists(fr"{sa_root}\{sa_index}"):
                    os.mkdir(fr"{sa_root}\{sa_index}")
                cv2.imwrite(fr"{sa_root}\{sa_index}\{i}.png", alignface)
                sa_cnt += 1
        # 判断是否有保存图片，是则加一，如果没有则设置为0
        # print(f"{read_folder_path} det ok")
        if sa_cnt != 0:
            sa_index += 1


# 使用MTCNN进行人脸侦测以及人脸对齐，这个函数用于保存图片。
def face_det_align(model, image):
    if model is None:
        print("wrong model input in face_det_align")
        return None
    if image is None:
        print("wrong image_path input in face_det_align", image)
        return None

    # 暂时不实现保存图片

    img_ = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    boxes = model(img_)

    if boxes == []:
        print("no person in this image")
        return None
    out_faces = []
    out_confs = []
    # 只保存一张人脸
    for box in boxes:
        landmark = np.array(box[5:], dtype=np.float32).reshape(-1, 2)
        alignface = face_align(landmark, image)
        # alignface = warp_and_crop_face(image,landmark,crop_size=(96,112))
        out_faces.append(alignface)
        out_confs.append(box[0])

    return out_faces,out_confs


if __name__ == '__main__':
    detone()
    # Detimges()
    # gen_casia_webface_sample()
