# Copyright (c) Alibaba, Inc. and its affiliates.

import json
import math
import os
import shutil

import cv2
import numpy as np
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from PIL import Image
from tqdm import tqdm


def crop_and_resize_v1(im, bbox):
    h, w, _ = im.shape
    thre = 0.35/1.15
    maxf = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
    cx = (bbox[2] + bbox[0]) / 2
    cy = (bbox[3] + bbox[1]) / 2
    lenp = int(maxf / thre)
    yc = 0.5/1.15
    xc = 0.5
    xmin = int(cx - xc * lenp)
    xmax = xmin + lenp
    ymin = int(cy - yc * lenp)
    ymax = ymin + lenp
    x1 = 0
    x2 = lenp
    y1 = 0
    y2 = lenp
    if xmin < 0:
        x1 = -xmin
        xmin = 0
    if xmax > w:
        x2 = w - (xmax - lenp)
        xmax = w
    if ymin < 0:
        y1 = -ymin
        ymin = 0
    if ymax > h:
        y2 = h - (ymax - lenp)
        ymax = h
    imc = (np.ones((lenp, lenp, 3)) * 255).astype(np.uint8)
    imc = im[ymin:ymax, xmin:xmax, :]
    imc = pad_to_square_v1(imc)
    imr = cv2.resize(imc, (512, 512))
    return imr

def pad_to_square_v1(im):
    h, w, _ = im.shape
    ns = int(max(h, w))
    im = cv2.copyMakeBorder(im, int((ns - h) / 2), (ns - h) - int((ns - h) / 2), int((ns - w) / 2),
                            (ns - w) - int((ns - w) / 2), cv2.BORDER_CONSTANT, 0)
    return im



def transformation_from_points(points1, points2):
    points1 = points1.astype(np.float64)
    points2 = points2.astype(np.float64)
    c1 = np.mean(points1, axis=0)
    c2 = np.mean(points2, axis=0)
    points1 -= c1
    points2 -= c2
    s1 = np.std(points1)
    s2 = np.std(points2)
    if s1 < 1.0e-4:
        s1 = 1.0e-4
    points1 /= s1
    points2 /= s2
    U, S, Vt = np.linalg.svd(points1.T * points2)
    R = (U * Vt).T
    return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)), np.matrix([0., 0., 1.])])


def rotate(im, keypoints):
    h, w, _ = im.shape
    points_array = np.zeros((5, 2))
    dst_mean_face_size = 160
    dst_mean_face = np.asarray([0.31074522411511746, 0.2798131190011913,
                                0.6892073313037804, 0.2797830232679366,
                                0.49997367716346774, 0.5099309118810921,
                                0.35811903020866753, 0.7233174007629063,
                                0.6418878095835022, 0.7232890570786875])
    dst_mean_face = np.reshape(dst_mean_face, (5, 2)) * dst_mean_face_size

    for k in range(5):
        points_array[k, 0] = keypoints[2 * k]
        points_array[k, 1] = keypoints[2 * k + 1]

    pts1 = np.float64(np.matrix([[point[0], point[1]] for point in points_array]))
    pts2 = np.float64(np.matrix([[point[0], point[1]] for point in dst_mean_face]))
    trans_mat = transformation_from_points(pts1, pts2)
    if trans_mat[1, 1] > 1.0e-4:
        angle = math.atan(trans_mat[1, 0] / trans_mat[1, 1])
    else:
        angle = math.atan(trans_mat[0, 1] / trans_mat[0, 2])
    im = pad_to_square(im)
    ns = int(1.5 * max(h, w))
    M = cv2.getRotationMatrix2D((ns / 2, ns / 2), angle=-angle / np.pi * 180, scale=1.0)
    im = cv2.warpAffine(im, M=M, dsize=(ns, ns))
    return im


def get_mask_head(result):
    masks = result['masks']
    scores = result['scores']
    labels = result['labels']
    mask_hair = np.zeros((512, 512))
    mask_face = np.zeros((512, 512))
    mask_human = np.zeros((512, 512))
    for i in range(len(labels)):
        if scores[i] > 0.8:
            if labels[i] == 'Face':
                if np.sum(masks[i]) > np.sum(mask_face):
                    mask_face = masks[i]
            elif labels[i] == 'Human':
                if np.sum(masks[i]) > np.sum(mask_human):
                    mask_human = masks[i]
            elif labels[i] == 'Hair':
                if np.sum(masks[i]) > np.sum(mask_hair):
                    mask_hair = masks[i]
    mask_head = np.clip(mask_hair + mask_face, 0, 1)
    ksize = max(int(np.sqrt(np.sum(mask_face)) / 20), 1)
    kernel = np.ones((ksize, ksize))
    mask_head = cv2.dilate(mask_head, kernel, iterations=1) * mask_human
    _, mask_head = cv2.threshold((mask_head * 255).astype(np.uint8), 127, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(mask_head, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    area = []
    for j in range(len(contours)):
        area.append(cv2.contourArea(contours[j]))
    max_idx = np.argmax(area)
    mask_head = np.zeros((512, 512)).astype(np.uint8)
    cv2.fillPoly(mask_head, [contours[max_idx]], 255)
    mask_head = mask_head.astype(np.float32) / 255
    mask_head = np.clip(mask_head + mask_face, 0, 1)
    mask_head = np.expand_dims(mask_head, 2)
    return mask_head


class Blipv2():
    def __init__(self):
        self.skin_retouching = pipeline('skin-retouching-torch', model='damo/cv_unet_skin_retouching_torch', model_revision='v1.0.1')
        # ToDo: face detection
        self.face_detection = pipeline(task=Tasks.face_detection, model='damo/cv_ddsar_face-detection_iclr23-damofd', model_revision='v1.1')
        # self.mog_face_detection_func = pipeline(Tasks.face_detection, 'damo/cv_resnet101_face-detection_cvpr22papermogface')
        self.segmentation_pipeline = pipeline(Tasks.image_segmentation,
                                              'damo/cv_resnet101_image-multiple-human-parsing', model_revision='v1.0.1')
        self.fair_face_attribute_func = pipeline(Tasks.face_attribute_recognition,
                                                 'damo/cv_resnet34_face-attribute-recognition_fairface', model_revision='v2.0.2')
        self.facial_landmark_confidence_func = pipeline(Tasks.face_2d_keypoints,
                                                        'damo/cv_manual_facial-landmark-confidence_flcm', model_revision='v2.5')

    def __call__(self, im):
        tmp_path = './tmp_face.png'
        h, w, _ = im.shape
        max_size = max(w, h)
        ratio = 1024 / max_size
        new_w = round(w * ratio)
        new_h = round(h * ratio)
        imt = cv2.resize(im, (new_w, new_h))
        cv2.imwrite(tmp_path, imt)
        # result_det = self.face_detection(tmp_path)
        # bboxes = result_det['boxes']
        # if len(bboxes) > 1:
        #     areas = []
        #     for i in range(len(bboxes)):
        #         bbox = bboxes[i]
        #         areas.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
        #     areas = np.array(areas)
        #     areas_new = np.sort(areas)[::-1]
        #     idxs = np.argsort(areas)[::-1]
        #     if areas_new[0] < 4 * areas_new[1]:
        #         print('Detecting multiple faces, do not use image')
        #     else:
        #         keypoints = result_det['keypoints'][idxs[0]]
        # elif len(bboxes) == 0:
        #     print('Detecting no face, do not use image')
        # else:
        #     keypoints = result_det['keypoints'][0]

        # im = rotate(im, keypoints)
        ns = im.shape[0]
        imt = cv2.resize(im, (1024, 1024))
        cv2.imwrite('face_image.png', imt)
        result_det = self.face_detection(tmp_path)
        bboxes = result_det['boxes']

        if len(bboxes) > 1:
            areas = []
            for i in range(len(bboxes)):
                bbox = bboxes[i]
                areas.append((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]))
            areas = np.array(areas)
            areas_new = np.sort(areas)[::-1]
            idxs = np.argsort(areas)[::-1]
            if areas_new[0] < 4 * areas_new[1]:
                print('Detecting multiple faces after rotation, do not use image')
            else:
                bbox = bboxes[idxs[0]]
        elif len(bboxes) == 0:
            print('Detecting no face after rotation, do not use this image')
        else:
            bbox = bboxes[0]

        for idx in range(4):
            bbox[idx] = bbox[idx] * ns / 1024
        imr = crop_and_resize_v1(im, bbox)
        cv2.imwrite(tmp_path, imr)

        result = self.skin_retouching(tmp_path)
        if (result is None or (result[OutputKeys.OUTPUT_IMG] is None)):
            print('Cannot do skin retouching, do not use this image.')
        cv2.imwrite(tmp_path, result[OutputKeys.OUTPUT_IMG])

        result = self.segmentation_pipeline(tmp_path)
        mask_head = get_mask_head(result)
        im = cv2.imread(tmp_path)
        im = im * mask_head + 255 * (1 - mask_head)
        print(im.shape)
        cv2.imwrite('c.png', im)
        raw_result = self.facial_landmark_confidence_func(im)
        if raw_result is None:
            print('landmark quality fail...')

        if float(raw_result['scores'][0]) < (1 - 0.145):
            print('landmark quality fail...')
            
        return imt

