# USAGE # python create_gif.py --config config.json --image images/me.jpg --output me.gif # import the necessary packages from imutils import face_utils from imutils import paths import numpy as np import argparse import imutils import shutil import json import dlib import cv2 import sys import os def overlay_image(bg, fg, fgMask, coords): (sH, sW) = fg.shape[:2] (x, y) = coords overlay = np.zeros(bg.shape, dtype="uint8") #黑色背景 overlay[y:y + sH, x:x + sW] = fg alpha = np.zeros(bg.shape[:2], dtype="uint8") alpha[y:y + sH, x:x + sW] = fgMask alpha = np.dstack([alpha] * 3) output = alpha_blend(overlay, bg, alpha) return output def alpha_blend(fg, bg, alpha): fg = fg.astype("float") bg = bg.astype("float") alpha = alpha.astype("float") / 255 fg = cv2.multiply(alpha, fg) bg = cv2.multiply(1 - alpha, bg) output = cv2.add(fg, bg) # return the output image return output.astype("uint8") def get_features(img_rd,*args): # 输入: img_rd: 图像文件 # 输出: pos_69to81: feature 69 to feature 81, 13 feature points in all, 40 points detector1 = dlib.get_frontal_face_detector() predictor1=dlib.shape_predictor("shape_predictor_81_face_landmarks.dat") # read img file img = cv2.imread(img_rd) # 取灰度 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 计算 81 点坐标 pos_81 = [] rects = detector1(img_gray, 0) landmarks = np.matrix([[p.x, p.y] for p in predictor1(img, rects[0]).parts()]) for idx, point in enumerate(landmarks): # 81点的坐标 pos = (point[0, 0], point[0, 1]) pos_81.append(pos) get_pos=[] get_pos.append((pos_81[args[0]][0],pos_81[args[0]][1])) get_pos.append((pos_81[args[1]][0],pos_81[args[1]][1])) return get_pos def create_gif(inputPath, outputPath, delay, finalDelay, loop): # grab all image paths in the input directory imagePaths = sorted(list(paths.list_images(inputPath))) # remove the last image path in the list lastPath = imagePaths[-1] imagePaths = imagePaths[:-1] cmd = "convert -delay {} {} -delay {} {} -loop {} {}".format( delay, " ".join(imagePaths), finalDelay, lastPath, loop, outputPath) os.system(cmd) ap = argparse.ArgumentParser() ap.add_argument("-c", "--config", required=True, help="path to configuration file") ap.add_argument("-i", "--image", required=True, help="path to input image") ap.add_argument("-o", "--output", required=True, help="path to output GIF") args = vars(ap.parse_args()) config = json.loads(open(args["config"]).read()) sg = cv2.imread(config["Crown"]) sgMask = cv2.imread(config["Crown_mask"]) shutil.rmtree(config["temp_dir"], ignore_errors=True) os.makedirs(config["temp_dir"]) print("[INFO] loading models...") detector = cv2.dnn.readNetFromCaffe(config["face_detector_prototxt"], config["face_detector_weights"]) predictor = dlib.shape_predictor(config["landmark_predictor"]) image = cv2.imread(args["image"]) (H, W) = image.shape[:2] blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)) print("[INFO] computing object detections...") detector.setInput(blob) detections = detector.forward() i = np.argmax(detections[0, 0, :, 2]) confidence = detections[0, 0, i, 2] if confidence < config["min_confidence"]: print("[INFO] no reliable faces found") sys.exit(0) # compute the (x, y)-coordinates of the bounding box for the face box = detections[0, 0, i, 3:7] * np.array([W, H, W, H]) (startX, startY, endX, endY) = box.astype("int") rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY)) shape = predictor(image, rect) shape = face_utils.shape_to_np(shape) forehead=get_features(args["image"],76,73) left_point=forehead[0] right_point=forehead[1] dY=left_point[1]-right_point[1] dX=left_point[0]-right_point[0] print("left_point:"+str(left_point[1])) angle = np.degrees(np.arctan2(dY, dX)) - 180 # rotate the sunglasses image by our computed angle, ensuring the # sunglasses will align with how the head is tilted sg = imutils.rotate_bound(sg, angle) # sgW = int((shape[16].astype(int)[0] - shape[0].astype(int)[0])*1.40) sgW=int((endX-startX)*1.2) sg = imutils.resize(sg, width=sgW) print("sgw"+str(shape[16])) sgMask = cv2.cvtColor(sgMask, cv2.COLOR_BGR2GRAY) sgMask = cv2.threshold(sgMask, 0, 255, cv2.THRESH_BINARY)[1] sgMask = imutils.rotate_bound(sgMask, angle) sgMask = imutils.resize(sgMask, width=sgW, inter=cv2.INTER_NEAREST) steps = np.linspace(0, left_point[1], config["steps"], dtype="int") # start looping over the steps for (i, y) in enumerate(steps): shiftX = int(sg.shape[1] * 0.18) shiftY = int(sg.shape[0]*0.80) y = max(0, y-shiftY) # add the sunglasses to the image output = overlay_image(image, sg, sgMask, (left_point[0]-shiftX, y)) if i == len(steps) - 1: dwi = cv2.imread(config["birthday"]) dwiMask = cv2.imread(config["birthday_mask"]) dwiMask = cv2.cvtColor(dwiMask, cv2.COLOR_BGR2GRAY) dwiMask = cv2.threshold(dwiMask, 0, 255, cv2.THRESH_BINARY)[1] # resize both the text image and mask to be 80% the width of # the output image oW = int(W * 0.8) dwi = imutils.resize(dwi, width=oW) dwiMask = imutils.resize(dwiMask, width=oW, inter=cv2.INTER_NEAREST) # compute the coordinates of where the text will go on the # output image and then add the text to the image oX = int(W * 0.1) oY = int(H * 0.7) output = overlay_image(output, dwi, dwiMask, (oX, oY)) # write the output image to our temporary directory p = os.path.sep.join([config["temp_dir"], "{}.jpg".format( str(i).zfill(8))]) cv2.imwrite(p, output) print("[INFO] creating GIF...") create_gif(config["temp_dir"], args["output"], config["delay"], config["final_delay"], config["loop"]) # cleanup by deleting our temporary directory print("[INFO] cleaning up...") shutil.rmtree(config["temp_dir"], ignore_errors=True)