liangtian commited on
Commit
1bad3bc
1 Parent(s): 0dcfd31

Upload create_gif.py

Browse files
Files changed (1) hide show
  1. create_gif.py +256 -0
create_gif.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # USAGE
2
+ # python create_gif.py --config config.json --image images/vampire.jpg --output out.gif
3
+
4
+ # import the necessary packages
5
+ from imutils import face_utils
6
+ from imutils import paths
7
+ import numpy as np
8
+ import argparse
9
+ import imutils
10
+ import shutil
11
+ import json
12
+ import dlib
13
+ import cv2
14
+ import sys
15
+ import os
16
+
17
+
18
+
19
+
20
+ def overlay_image(bg, fg, fgMask, coords):
21
+
22
+ (sH, sW) = fg.shape[:2]
23
+ (x, y) = coords
24
+
25
+
26
+ overlay = np.zeros(bg.shape, dtype="uint8") #黑色背景
27
+ overlay[y:y + sH, x:x + sW] = fg
28
+
29
+
30
+ alpha = np.zeros(bg.shape[:2], dtype="uint8")
31
+ alpha[y:y + sH, x:x + sW] = fgMask
32
+ alpha = np.dstack([alpha] * 3)
33
+
34
+
35
+ output = alpha_blend(overlay, bg, alpha)
36
+
37
+
38
+ return output
39
+
40
+ def alpha_blend(fg, bg, alpha):
41
+
42
+ fg = fg.astype("float")
43
+ bg = bg.astype("float")
44
+ alpha = alpha.astype("float") / 255
45
+
46
+
47
+ fg = cv2.multiply(alpha, fg)
48
+ bg = cv2.multiply(1 - alpha, bg)
49
+
50
+
51
+ output = cv2.add(fg, bg)
52
+
53
+ # return the output image
54
+ return output.astype("uint8")
55
+
56
+
57
+ def get_features(img_rd,*args):
58
+
59
+ # 输入: img_rd: 图像文件
60
+ # 输出: pos_69to81: feature 69 to feature 81, 13 feature points in all, 40 points
61
+ detector1 = dlib.get_frontal_face_detector()
62
+ predictor1=dlib.shape_predictor(r"D:\PythonProject\birthday_crown\Creating-GIFs-with-OpenCV-master\assets\shape_predictor_81_face_landmarks.dat")
63
+ # read img file
64
+ img = cv2.imread(img_rd)
65
+ # 取灰度
66
+ img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
67
+
68
+ # 计算 81 点坐标
69
+ pos_81 = []
70
+ rects = detector1(img_gray, 0)
71
+ landmarks = np.matrix([[p.x, p.y] for p in predictor1(img, rects[0]).parts()])
72
+
73
+ for idx, point in enumerate(landmarks):
74
+ # 81点的坐标
75
+ pos = (point[0, 0], point[0, 1])
76
+ pos_81.append(pos)
77
+
78
+ # pos_69to81 = []
79
+ # # 将点 69-81 写入 CSV
80
+ # # 即 pos_81[68]-pos_81[80]
81
+ # for i in range(68, 81):
82
+ # pos_69to81.append(pos_81[i][0])
83
+ # pos_69to81.append(pos_81[i][1])
84
+ # return pos_69to81
85
+ get_pos=[]
86
+ get_pos.append((pos_81[args[0]][0],pos_81[args[0]][1]))
87
+ get_pos.append((pos_81[args[1]][0],pos_81[args[1]][1]))
88
+ return get_pos
89
+
90
+
91
+ def create_gif(inputPath, outputPath, delay, finalDelay, loop):
92
+ # grab all image paths in the input directory
93
+ imagePaths = sorted(list(paths.list_images(inputPath)))
94
+
95
+ # remove the last image path in the list
96
+ lastPath = imagePaths[-1]
97
+ imagePaths = imagePaths[:-1]
98
+
99
+
100
+ cmd = "convert -delay {} {} -delay {} {} -loop {} {}".format(
101
+ delay, " ".join(imagePaths), finalDelay, lastPath, loop,
102
+ outputPath)
103
+ os.system(cmd)
104
+
105
+
106
+ ap = argparse.ArgumentParser()
107
+ ap.add_argument("-c", "--config", required=True,
108
+ help="path to configuration file")
109
+ ap.add_argument("-i", "--image", required=True,
110
+ help="path to input image")
111
+ ap.add_argument("-o", "--output", required=True,
112
+ help="path to output GIF")
113
+ args = vars(ap.parse_args())
114
+
115
+
116
+ config = json.loads(open(args["config"]).read())
117
+ sg = cv2.imread(config["sunglasses"])
118
+ sgMask = cv2.imread(config["sunglasses_mask"])
119
+
120
+
121
+ shutil.rmtree(config["temp_dir"], ignore_errors=True)
122
+ os.makedirs(config["temp_dir"])
123
+
124
+
125
+ print("[INFO] loading models...")
126
+ detector = cv2.dnn.readNetFromCaffe(config["face_detector_prototxt"],
127
+ config["face_detector_weights"])
128
+ predictor = dlib.shape_predictor(config["landmark_predictor"])
129
+
130
+
131
+ image = cv2.imread(args["image"])
132
+ (H, W) = image.shape[:2]
133
+ blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,
134
+ (300, 300), (104.0, 177.0, 123.0))
135
+
136
+
137
+ print("[INFO] computing object detections...")
138
+ detector.setInput(blob)
139
+ detections = detector.forward()
140
+
141
+
142
+ i = np.argmax(detections[0, 0, :, 2])
143
+ confidence = detections[0, 0, i, 2]
144
+
145
+
146
+ if confidence < config["min_confidence"]:
147
+ print("[INFO] no reliable faces found")
148
+ sys.exit(0)
149
+
150
+
151
+ # compute the (x, y)-coordinates of the bounding box for the face
152
+ box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
153
+ (startX, startY, endX, endY) = box.astype("int")
154
+
155
+ rect = dlib.rectangle(int(startX), int(startY), int(endX), int(endY))
156
+ shape = predictor(image, rect)
157
+ shape = face_utils.shape_to_np(shape)
158
+
159
+ forehead=get_features(args["image"],76,73)
160
+ print("额头"+str(forehead))
161
+
162
+ # print("LANDMARKS_IDXS"+str(face_utils.FACIAL_LANDMARKS_IDXS))
163
+ # (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
164
+ # (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
165
+ # leftEyePts = shape[lStart:lEnd]
166
+ # rightEyePts = shape[rStart:rEnd]
167
+ #
168
+ # # compute the center of mass for each eye
169
+ # leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
170
+ # rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
171
+
172
+ # compute the angle between the eye centroids
173
+ # dY = rightEyeCenter[1] - leftEyeCenter[1]
174
+ # dX = rightEyeCenter[0] - leftEyeCenter[0]
175
+ # angle = np.degrees(np.arctan2(dY, dX)) - 180
176
+ left_point=forehead[0]
177
+ right_point=forehead[1]
178
+
179
+ # left_point=shape[75].astype(int)
180
+ # right_point=shape[79].astype(int)
181
+
182
+ dY=left_point[1]-right_point[1]
183
+ dX=left_point[0]-right_point[0]
184
+ print("left_point:"+str(left_point[1]))
185
+ angle = np.degrees(np.arctan2(dY, dX)) - 180
186
+
187
+ # rotate the sunglasses image by our computed angle, ensuring the
188
+ # sunglasses will align with how the head is tilted
189
+ sg = imutils.rotate_bound(sg, angle)
190
+
191
+
192
+ # sgW = int((shape[16].astype(int)[0] - shape[0].astype(int)[0])*1.40)
193
+ sgW=int((endX-startX)*1.2)
194
+ sg = imutils.resize(sg, width=sgW)
195
+ print("sgw"+str(shape[16]))
196
+
197
+
198
+ sgMask = cv2.cvtColor(sgMask, cv2.COLOR_BGR2GRAY)
199
+ sgMask = cv2.threshold(sgMask, 0, 255, cv2.THRESH_BINARY)[1]
200
+ sgMask = imutils.rotate_bound(sgMask, angle)
201
+ sgMask = imutils.resize(sgMask, width=sgW, inter=cv2.INTER_NEAREST)
202
+
203
+
204
+
205
+ steps = np.linspace(0, left_point[1], config["steps"],
206
+ dtype="int")
207
+
208
+ # start looping over the steps
209
+ for (i, y) in enumerate(steps):
210
+
211
+ shiftX = int(sg.shape[1] * 0.18)
212
+ shiftY = int(sg.shape[0]*0.80)
213
+ y = max(0, y-shiftY)
214
+
215
+ # add the sunglasses to the image
216
+ output = overlay_image(image, sg, sgMask,
217
+ (left_point[0]-shiftX, y))
218
+
219
+
220
+ if i == len(steps) - 1:
221
+
222
+ dwi = cv2.imread(config["deal_with_it"])
223
+ dwiMask = cv2.imread(config["deal_with_it_mask"])
224
+ dwiMask = cv2.cvtColor(dwiMask, cv2.COLOR_BGR2GRAY)
225
+ dwiMask = cv2.threshold(dwiMask, 0, 255,
226
+ cv2.THRESH_BINARY)[1]
227
+
228
+ # resize both the text image and mask to be 80% the width of
229
+ # the output image
230
+ oW = int(W * 0.8)
231
+ dwi = imutils.resize(dwi, width=oW)
232
+ dwiMask = imutils.resize(dwiMask, width=oW,
233
+ inter=cv2.INTER_NEAREST)
234
+
235
+ # compute the coordinates of where the text will go on the
236
+ # output image and then add the text to the image
237
+ oX = int(W * 0.1)
238
+ oY = int(H * 0.7)
239
+ output = overlay_image(output, dwi, dwiMask, (oX, oY))
240
+
241
+ # write the output image to our temporary directory
242
+ p = os.path.sep.join([config["temp_dir"], "{}.jpg".format(
243
+ str(i).zfill(8))])
244
+ cv2.imwrite(p, output)
245
+
246
+
247
+ print("[INFO] creating GIF...")
248
+ create_gif(config["temp_dir"], args["output"], config["delay"],
249
+ config["final_delay"], config["loop"])
250
+
251
+ # cleanup by deleting our temporary directory
252
+ print("[INFO] cleaning up...")
253
+ shutil.rmtree(config["temp_dir"], ignore_errors=True)
254
+
255
+
256
+