import base64
import copy
import os
from utils import img_arr_to_b64, __factor__
import dlib

import cv2
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
import time

from utils.nets.MobileNetV2_unet import MobileNetV2_unet

BASEDIR = os.getcwd()
# load model
modelPath = BASEDIR + os.sep + 'models' + os.sep + 'model.pt'
mobileModelPath = BASEDIR + os.sep + 'models' + os.sep + 'mobilenet_v2.pth.tar'
cnn_face_model = BASEDIR + os.sep + 'models' + os.sep + "mmod_human_face_detector.dat"
useGPU = False
if useGPU:
    device = torch.device("gpu")
else:
    device = torch.device("cpu")


def localEqualHist(image):
    clahe = cv2.createCLAHE(clipLimit=5, tileGridSize=(7, 7))
    # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    b, g, r = cv2.split(image)
    dstR = clahe.apply(r)
    dstG = clahe.apply(g)
    dstB = clahe.apply(b)

    img = cv2.merge([dstB, dstG, dstR])
    return img


def load_model():
    # print(device)
    model = MobileNetV2_unet(mobileModelPath, device=str(device)).to(device)
    state_dict = torch.load(modelPath, map_location='cpu')
    model.load_state_dict(state_dict)
    model.eval()
    return model


def removeBlankColumns(img: np.ndarray) -> np.ndarray:
    idx = np.argwhere(np.all(img[..., :] == 0, axis=0))
    # print(idx)
    a2 = np.delete(img, idx, axis=1)
    return a2


def removeBlankRows(img: np.ndarray) -> np.ndarray:
    idx = np.argwhere(np.all(img[:, ...] == 0, axis=1))
    a2 = np.delete(img, idx, axis=0)
    # print(idx)
    return a2


def generateMask(n: np.ndarray, number: int, axis: int):
    n[n > 0] = 1
    if axis == 0:
        return np.tile(n, (number, 1))
    else:
        return np.tile(n.reshape(len(n), 1), (1, number))


def getSegmentedFace(imgPath: str):
    model = load_model()
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
    ])

    image = cv2.imread(imgPath)
    # image = io.imread(imgPath)
    oriImgShape = image.shape
    # print(oriImgShape)
    oriImage = copy.deepcopy(image)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    pil_img = Image.fromarray(image)
    torch_img = transform(pil_img)
    torch_img = torch_img.unsqueeze(0)
    torch_img = torch_img.to(device)

    logits = model(torch_img)
    mask = np.argmax(logits.data.cpu().numpy(), axis=1)

    mask = np.array(mask[0], dtype=np.uint8)
    if np.max(mask) > 0:
        mask = cv2.resize(mask, (oriImgShape[1], oriImgShape[0]))

        mask[mask > 0] = 1
        rowSum = np.sum(mask, axis=0)
        columnSum = np.sum(mask, axis=1)

        rowMask = generateMask(rowSum, oriImgShape[0], 0)
        columnMask = generateMask(columnSum, oriImgShape[1], 1)
        maskT = np.array(rowMask * columnMask, dtype=np.uint8)

        maskT = cv2.merge([maskT, maskT, maskT])

        result = oriImage * maskT
        res1 = removeBlankColumns(result)
        res2 = removeBlankRows(res1)
        return res2
    else:
        return oriImage


def getFaceRegionCNN(imgfile):
    cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_model)
    img = dlib.load_rgb_image(imgfile)
    dets = cnn_face_detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print(
            "Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}"
            .format(i, d.rect.left(), d.rect.top(), d.rect.right(),
                    d.rect.bottom(), d.confidence))
    rects = dlib.rectangles()
    rects.extend([d.rect for d in dets])

    print(rects)


def getFaceNumber(imgfile):
    detector = dlib.get_frontal_face_detector()
    img = cv2.imread(imgfile)
    # img = io.imread(imgfile)
    # cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_model)
    # img = dlib.load_rgb_image(imgfile)
    dets = detector(img, 1)
    return len(dets)


def getFaceRegion(imgfile):
    detector = dlib.get_frontal_face_detector()
    img = cv2.imread(imgfile)
    # img = io.imread(imgfile)
    dets = detector(img, 1)
    rects = dlib.rectangles()
    rects.extend([d for d in dets])
    # return rects
    faceRegion = getMaxFace(rects)
    left = faceRegion.left()
    top = faceRegion.top()
    right = faceRegion.right()
    bottom = faceRegion.bottom()

    face = img[top:bottom, left:right]
    return face


def getMaxFace(rects: list):
    if len(rects) == 1:
        return rects[0]
    else:
        areas = []
        for r in rects:
            left = r.left()
            top = r.top()
            right = r.right()
            bottom = r.bottom()
            areas.append((bottom - top) * (right - left))
        max_face_index = np.argmax(areas)
        return rects[max_face_index]


def adjustImage(img: np.ndarray):
    imgName = "tmp_{}".format(str(time.time()).replace(".", "")) + ".jpg"
    q = 100
    cv2.imwrite(imgName, img, [int(cv2.IMWRITE_JPEG_QUALITY), q])
    while os.path.getsize(imgName) / 1024 > 50:
        imgShape = img.shape
        q = int(q * __factor__)
        print(q)
        height = int(imgShape[0] * __factor__)
        width = int(imgShape[1] * __factor__)
        print(height, width)
        img = cv2.resize(img, (width, height))
        cv2.imwrite(imgName, img, [int(cv2.IMWRITE_JPEG_QUALITY), q])


if __name__ == "__main__":
    # img = getFaceRegion("D:\\workspace\\faceCrop\\srf.png")
    img = getSegmentedFace("D:\\workspace\\faceCrop\\srf.png")
    # b64code = img_arr_to_b64(img)
    # imagedata = base64.b64decode(b64code)
    cv2.imwrite('./result/mask_818_srf.jpg', img)
    # size_ = os.path.getsize("mask_811_srf.jpg")
    # print(size_ / 1024)
    # img = cv2.imread("D:\\workspace\\faceCrop\\7.png")
    # adjustImage(img)

    # img = cv2.imread("D:\\workspace\\faceCrop\\srf.png")
    # # _img = localEqualHist(img)
    # # img_norm = cv2.normalize(img,
    # #                          dst=None,
    # #                          alpha=350,
    # #                          beta=10,
    # #                          norm_type=cv2.NORM_MINMAX)
    # img_norm = img/255.0
    # img_gamma = np.power(img_norm,0.4)*255.0
    # img_gamma = img_gamma.astype(np.uint8)

    # cv2.imwrite('./result/mask_818_srf.jpg', img_gamma)
