#!/usr/bin/env python
# coding: utf-8

# In[1]:


#get_ipython().run_line_magic('reload_ext', 'autoreload')
#get_ipython().run_line_magic('autoreload', '2')
#get_ipython().run_line_magic('matplotlib', 'inline')


# In[2]:


#get_ipython().system('pip install -q paddlehub==1.6.1')


# In[3]:


import paddlehub as hub
from random import randrange
import math
import numpy as np
import cv2
import matplotlib.pyplot as plt 
import matplotlib.image as mpimg

def get_random_color():
    return randrange(0, 255, 1), randrange(10, 255, 1), randrange(10, 255, 1)


LABELS = ['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge',
          'nose_tip', 'left_eye', 'right_eye', 'top_lip', 'bottom_lip']
COLORS = [get_random_color() for _ in LABELS]


# In[4]:


def get_landmarks(img):
    module = hub.Module(name="face_landmark_localization")
    result = module.keypoint_detection(images=[img])
    landmarks = result[0]['data'][0]
    return landmarks


# In[5]:


def get_face_rectangle(img):
    face_detector = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_320")
    result = face_detector.face_detection(images=[img])
    x1 = int(result[0]['data'][0]['left'])
    y1 = int(result[0]['data'][0]['top'])
    x2 = int(result[0]['data'][0]['right'])
    y2 = int(result[0]['data'][0]['bottom'])
    return x1, y1, x2 - x1, y2 - y1


# In[6]:


def face_landmarks(face_image, location_of_face=None):
    landmarks = get_landmarks(face_image)
    landmarks_as_tuples = [[(int(p[0]), int(p[1])) for p in landmarks]]
    return [{
        "chin": points[0:17],
        "left_eyebrow": points[17:22],
        "right_eyebrow": points[22:27],
        "nose_bridge": points[27:31],
        "nose_tip": points[31:36],
        "left_eye": points[36:42],
        "right_eye": points[42:48],
        "top_lip": points[48:55] + [points[64]] + [points[63]] + [points[62]] + [points[61]] + [points[60]],
        "bottom_lip": points[54:60] + [points[48]] + [points[60]] +
                      [points[67]] + [points[66]] + [points[65]] + [points[64]]
    } for points in landmarks_as_tuples]


# In[7]:


def calculate_angle(point1, point2):
    x1, x2, y1, y2 = point1[0], point2[0], point1[1], point2[1]
    return 180 / math.pi * math.atan((float(y2 - y1)) / (x2 - x1))


# In[8]:


def rotate_bound(image, angle):
    (h, w) = image.shape[:2]
    (cX, cY) = (w / 2, h / 2)

    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    nW = int((h * sin) + (w * cos))
    nH = int((h * cos) + (w * sin))

    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY

    return cv2.warpAffine(image, M, (nW, nH)), M


# In[9]:



def overlay_transparent(background_img, img_to_overlay_t, x, y, overlay_size=None):
    bg_img = background_img.copy()
    # convert 3 channels to 4 channels
    if bg_img.shape[2] == 3:
        bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2BGRA)

    if overlay_size is not None:
        img_to_overlay_t = cv2.resize(img_to_overlay_t.copy(), overlay_size)

    b, g, r, a = cv2.split(img_to_overlay_t)

    mask = cv2.medianBlur(a, 5)

    h, w, _ = img_to_overlay_t.shape
    roi = bg_img[int(y - h / 2):int(y + h / 2), int(x - w / 2):int(x + w / 2)]

    img1_bg = cv2.bitwise_and(roi.copy(), roi.copy(), mask=cv2.bitwise_not(mask))
    img2_fg = cv2.bitwise_and(img_to_overlay_t, img_to_overlay_t, mask=mask)

    bg_img[int(y - h / 2):int(y + h / 2), int(x - w / 2):int(x + w / 2)] = cv2.add(img1_bg, img2_fg)

    # convert 4 channels to 3 channels
    bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGRA2BGR)

    return bg_img



# In[10]:


from math import degrees, atan2

def angle_between(p1, p2):
    x_diff = p2[0] - p1[0]
    y_diff = p2[1] - p1[1]
    return degrees(atan2(y_diff, x_diff))


# In[ ]:





# In[11]:


def get_eye_center_point(landmarks, idx1, idx2):
    center_x = (landmarks[idx1][0] + landmarks[idx2][0]) // 2
    center_y = (landmarks[idx1][1] + landmarks[idx2][1]) // 2
    return (center_x, center_y)


# In[ ]:





# In[ ]:





# In[12]:


src_img = cv2.imread('test.jpg')

module = hub.Module(name="face_landmark_localization")
result = module.keypoint_detection(images=[src_img])
# module = hub.Module(name="pyramidbox_face_detection")
# result = module.face_detection(images=[src_img])
tmp_img = src_img.copy()
for index, point in enumerate(result[0]['data'][0]):
	# print(point)
    if (index >17 and index<27) or (index>=36 and index<48):
        continue
	# cv2.putText(img, str(index), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_COMPLEX, 3, (0,0,255), -1)
    cv2.circle(tmp_img, (int(point[0]), int(point[1])), 2, (0, 0, 255), -1)

res_img_path = 'face_landmark.jpg'
cv2.imwrite(res_img_path, tmp_img)

img = mpimg.imread(res_img_path) 
# 展示预测68个关键点结果
plt.figure(figsize=(10,10))
plt.imshow(img) 
plt.axis('off') 
plt.show()


# In[13]:


len(result[0]['data'][0])


# In[14]:


def get_center_point(landmarks, idx1, idx2):
    center_x = (landmarks[idx1][0] + landmarks[idx2][0]) // 2
    center_y = (landmarks[idx1][1] + landmarks[idx2][1]) // 2
    return (center_x, center_y)


# In[15]:


chin=get_center_point(result[0]['data'][0],27,36)
chin


# In[16]:


result[0]['data'][0][9]


# In[17]:


result[0]['data'][0][10]


# In[18]:


#src_img = cv2.imread('mask/1.jpg')
#module = hub.Module(name="pyramidbox_lite_mobile_mask")
#input_dict = {"data": [src_img]}
#results = module.face_detection(data=input_dict)


# In[19]:


#results


# In[20]:


def wear_masks(image, masks, chin_l,chin_r):
    chin_l = np.array(chin_l)
    chin_r = np.array(chin_r)
    masks_center = np.mean([chin_l, chin_r], axis=0)  # put masks's center to this center
    masks_size = np.linalg.norm(chin_l - chin_r) * 2  # the width of masks mask
    angle = -angle_between(chin_l, chin_r)

    masks_h, masks_w = masks.shape[:2]
    masks_c = (masks_w / 2, masks_h / 2)
    M = cv2.getRotationMatrix2D(masks_c, angle, 1)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # compute the new bounding dimensions of the image
    nW = int((masks_h * sin) + (masks_w * cos))
    nH = int((masks_h * cos) + (masks_w * sin))

    # adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - masks_c[0]
    M[1, 2] += (nH / 2) - masks_c[1]

    rotated_masks = cv2.warpAffine(masks, M, (nW, nH))

    try:
        image = overlay_transparent(image, rotated_masks, masks_center[0], masks_center[1],
                                    overlay_size=(
                                        int(masks_size),
                                        int(rotated_masks.shape[0] * masks_size / rotated_masks.shape[1]))
                                    )
    except Exception as e:
        print('failed overlay image')
        print(e)
    return image


# In[23]:


import os
import matplotlib.animation as animation
from IPython.display import HTML

masks_lists = []
fig = plt.figure()
module = hub.Module(name="face_landmark_localization")
plt.rcParams['animation.ffmpeg_path'] = 'E:\\software\\python\\ffmpeg\\bin\\ffmpeg.exe'
for path in os.listdir('mask'):
    image_file = 'test.jpg'
    masks_file = 'mask/' + path

    image = cv2.imread(image_file)
    print(image.shape)
    masks = cv2.imread(masks_file, cv2.IMREAD_UNCHANGED)
    if masks.shape[2]==3:
        masks = cv2.cvtColor(masks, cv2.COLOR_BGR2BGRA)  
    result = module.keypoint_detection(images=[image])
    landmarks = result[0]['data'][0]
    # nose=get_center_point(landmarks,27,36)
    chin_l=landmarks[30]
    chin_r=landmarks[67]
    image = wear_masks(image, masks, chin_l, chin_r)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    im = plt.imshow(image, animated=True)
    plt.axis('off') 
    masks_lists.append([im])

ani = animation.ArtistAnimation(fig, masks_lists, interval=1000, blit=True, repeat_delay=1000)
HTML(ani.to_html5_video())


# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# #### 数据长度的分布？

# In[ ]:





# In[ ]:





# In[ ]:




