KenjieDec commited on
Commit
172e426
1 Parent(s): 852b76a

Face Inpainting and Selfie

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ inpainting.png filter=lfs diff=lfs merge=lfs -text
GPEN.ipynb DELETED
@@ -1,343 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "id": "7TLDy5mi1rid"
7
- },
8
- "source": [
9
- "# Run once only!"
10
- ]
11
- },
12
- {
13
- "cell_type": "code",
14
- "execution_count": null,
15
- "metadata": {
16
- "id": "nG64GBsoyoMH"
17
- },
18
- "outputs": [],
19
- "source": []
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": null,
24
- "metadata": {
25
- "colab": {
26
- "base_uri": "https://localhost:8080/"
27
- },
28
- "id": "--fxPES99Cp0",
29
- "outputId": "aa291a7e-68b8-4115-f177-fad3ce43a8fd"
30
- },
31
- "outputs": [
32
- {
33
- "name": "stdout",
34
- "output_type": "stream",
35
- "text": [
36
- "Cloning into 'GPEN'...\n",
37
- "remote: Enumerating objects: 142, done.\u001b[K\n",
38
- "remote: Counting objects: 100% (142/142), done.\u001b[K\n",
39
- "remote: Compressing objects: 100% (123/123), done.\u001b[K\n",
40
- "remote: Total 142 (delta 21), reused 127 (delta 12), pack-reused 0\u001b[K\n",
41
- "Receiving objects: 100% (142/142), 22.05 MiB | 39.74 MiB/s, done.\n",
42
- "Resolving deltas: 100% (21/21), done.\n"
43
- ]
44
- }
45
- ],
46
- "source": [
47
- "! git clone https://github.com/KenjieDec/GPEN GPEN"
48
- ]
49
- },
50
- {
51
- "cell_type": "code",
52
- "execution_count": null,
53
- "metadata": {
54
- "colab": {
55
- "base_uri": "https://localhost:8080/"
56
- },
57
- "id": "t8AU9f7U-vKz",
58
- "outputId": "b2489822-858c-4e26-82c4-9da16573501c"
59
- },
60
- "outputs": [
61
- {
62
- "name": "stdout",
63
- "output_type": "stream",
64
- "text": [
65
- "--2021-06-13 05:26:36-- https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/RetinaFace-R50.pth\n",
66
- "Resolving public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)... 47.101.88.25\n",
67
- "Connecting to public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)|47.101.88.25|:443... connected.\n",
68
- "HTTP request sent, awaiting response... 200 OK\n",
69
- "Length: 109497761 (104M) [application/octet-stream]\n",
70
- "Saving to: ‘RetinaFace-R50.pth’\n",
71
- "\n",
72
- "RetinaFace-R50.pth 100%[===================>] 104.42M 9.53MB/s in 17s \n",
73
- "\n",
74
- "2021-06-13 05:26:55 (6.17 MB/s) - ‘RetinaFace-R50.pth’ saved [109497761/109497761]\n",
75
- "\n",
76
- "--2021-06-13 05:26:55-- https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-512.pth\n",
77
- "Resolving public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)... 47.101.88.25\n",
78
- "Connecting to public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)|47.101.88.25|:443... connected.\n",
79
- "HTTP request sent, awaiting response... 200 OK\n",
80
- "Length: 284085738 (271M) [application/octet-stream]\n",
81
- "Saving to: ‘GPEN-512.pth’\n",
82
- "\n",
83
- "GPEN-512.pth 100%[===================>] 270.92M 8.48MB/s in 43s \n",
84
- "\n",
85
- "2021-06-13 05:27:40 (6.30 MB/s) - ‘GPEN-512.pth’ saved [284085738/284085738]\n",
86
- "\n",
87
- "--2021-06-13 05:27:40-- https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-1024-Color.pth\n",
88
- "Resolving public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)... 47.101.88.25\n",
89
- "Connecting to public-vigen-video.oss-cn-shanghai.aliyuncs.com (public-vigen-video.oss-cn-shanghai.aliyuncs.com)|47.101.88.25|:443... connected.\n",
90
- "HTTP request sent, awaiting response... 200 OK\n",
91
- "Length: 284914645 (272M) [application/octet-stream]\n",
92
- "Saving to: ‘GPEN-1024-Color.pth’\n",
93
- "\n",
94
- "GPEN-1024-Color.pth 100%[===================>] 271.71M 9.09MB/s in 42s \n",
95
- "\n",
96
- "2021-06-13 05:28:25 (6.43 MB/s) - ‘GPEN-1024-Color.pth’ saved [284914645/284914645]\n",
97
- "\n"
98
- ]
99
- }
100
- ],
101
- "source": [
102
- "!wget \"https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/RetinaFace-R50.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116085&Signature=GlUNW6%2B8FxvxWmE9jKIZYOOciKQ%3D\" -O GPEN/weights/RetinaFace-R50.pth\n",
103
- "!wget \"https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-512.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116208&Signature=hBgvVvKVSNGeXqT8glG%2Bd2t2OKc%3D\" -O GPEN/weights/GPEN-512.pth\n",
104
- "!wget \"https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Colorization-1024.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116315&Signature=9tPavW2h%2F1LhIKiXj73sTQoWqcc%3D\" -O GPEN/weights/GPEN-1024-Color.pth "
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": null,
110
- "metadata": {
111
- "colab": {
112
- "base_uri": "https://localhost:8080/"
113
- },
114
- "id": "Zfc5z0CyAVKY",
115
- "outputId": "98dc0eab-167c-4415-96f5-9e8af5bc2a6e"
116
- },
117
- "outputs": [
118
- {
119
- "name": "stdout",
120
- "output_type": "stream",
121
- "text": [
122
- "Collecting torch==1.7.1\n",
123
- "\u001b[?25l Downloading https://files.pythonhosted.org/packages/90/5d/095ddddc91c8a769a68c791c019c5793f9c4456a688ddd235d6670924ecb/torch-1.7.1-cp37-cp37m-manylinux1_x86_64.whl (776.8MB)\n",
124
- "\u001b[K |████████████████████████████████| 776.8MB 24kB/s \n",
125
- "\u001b[?25hCollecting torchvision==0.8.2\n",
126
- "\u001b[?25l Downloading https://files.pythonhosted.org/packages/94/df/969e69a94cff1c8911acb0688117f95e1915becc1e01c73e7960a2c76ec8/torchvision-0.8.2-cp37-cp37m-manylinux1_x86_64.whl (12.8MB)\n",
127
- "\u001b[K |████████████████████████████████| 12.8MB 264kB/s \n",
128
- "\u001b[?25hCollecting torchaudio==0.7.2\n",
129
- "\u001b[?25l Downloading https://files.pythonhosted.org/packages/37/16/ecdb9eb09ec6b8133d6c9536ea9e49cd13c9b5873c8488b8b765a39028da/torchaudio-0.7.2-cp37-cp37m-manylinux1_x86_64.whl (7.6MB)\n",
130
- "\u001b[K |████████████████████████████████| 7.6MB 18.2MB/s \n",
131
- "\u001b[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1) (1.19.5)\n",
132
- "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1) (3.7.4.3)\n",
133
- "Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.7/dist-packages (from torchvision==0.8.2) (7.1.2)\n",
134
- "\u001b[31mERROR: torchtext 0.9.1 has requirement torch==1.8.1, but you'll have torch 1.7.1 which==incompatible.\u001b[0m\n",
135
- "Installing collected packages: torch, torchvision, torchaudio\n",
136
- " Found existing installation: torch 1.8.1+cu101\n",
137
- " Uninstalling torch-1.8.1+cu101:\n",
138
- " Successfully uninstalled torch-1.8.1+cu101\n",
139
- " Found existing installation: torchvision 0.9.1+cu101\n",
140
- " Uninstalling torchvision-0.9.1+cu101:\n",
141
- " Successfully uninstalled torchvision-0.9.1+cu101\n",
142
- "Successfully installed torch-1.7.1 torchaudio-0.7.2 torchvision-0.8.2\n",
143
- "Requirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (19.3.1)\n",
144
- "Requirement already satisfied: install in /usr/local/lib/python3.7/dist-packages (1.3.4)\n",
145
- "Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (4.1.2.30)\n",
146
- "Requirement already satisfied: numpy>=1.14.5 in /usr/local/lib/python3.7/dist-packages (from opencv-python) (1.19.5)\n"
147
- ]
148
- }
149
- ],
150
- "source": [
151
- "!pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2\n",
152
- "!pip install pip install opencv-python"
153
- ]
154
- },
155
- {
156
- "cell_type": "code",
157
- "execution_count": null,
158
- "metadata": {
159
- "colab": {
160
- "base_uri": "https://localhost:8080/"
161
- },
162
- "id": "BlSfEFXgnAHu",
163
- "outputId": "6502bfec-2efd-4e36-fef6-4fc354132ea2"
164
- },
165
- "outputs": [
166
- {
167
- "name": "stdout",
168
- "output_type": "stream",
169
- "text": [
170
- "/content/GPEN\n"
171
- ]
172
- }
173
- ],
174
- "source": [
175
- "%cd GPEN"
176
- ]
177
- },
178
- {
179
- "cell_type": "code",
180
- "execution_count": null,
181
- "metadata": {
182
- "colab": {
183
- "base_uri": "https://localhost:8080/"
184
- },
185
- "id": "dt6hIEjyx7Nt",
186
- "outputId": "13de37fd-6dca-437f-c892-be89560a83d6"
187
- },
188
- "outputs": [
189
- {
190
- "name": "stdout",
191
- "output_type": "stream",
192
- "text": [
193
- "--2021-06-13 05:31:09-- https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip\n",
194
- "Resolving github.com (github.com)... 140.82.113.3\n",
195
- "Connecting to github.com (github.com)|140.82.113.3|:443... connected.\n",
196
- "HTTP request sent, awaiting response... 302 Found\n",
197
- "Location: https://github-releases.githubusercontent.com/1335132/d2f252e2-9801-11e7-9fbf-bc7b4e4b5c83?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210613%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210613T053109Z&X-Amz-Expires=300&X-Amz-Signature=0297051ab9ba7cd75b20f001e6392f931bd0fbde6cea59a28c6b3bb7fdb97b78&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=1335132&response-content-disposition=attachment%3B%20filename%3Dninja-linux.zip&response-content-type=application%2Foctet-stream [following]\n",
198
- "--2021-06-13 05:31:09-- https://github-releases.githubusercontent.com/1335132/d2f252e2-9801-11e7-9fbf-bc7b4e4b5c83?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210613%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210613T053109Z&X-Amz-Expires=300&X-Amz-Signature=0297051ab9ba7cd75b20f001e6392f931bd0fbde6cea59a28c6b3bb7fdb97b78&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=1335132&response-content-disposition=attachment%3B%20filename%3Dninja-linux.zip&response-content-type=application%2Foctet-stream\n",
199
- "Resolving github-releases.githubusercontent.com (github-releases.githubusercontent.com)... 185.199.108.154, 185.199.109.154, 185.199.110.154, ...\n",
200
- "Connecting to github-releases.githubusercontent.com (github-releases.githubusercontent.com)|185.199.108.154|:443... connected.\n",
201
- "HTTP request sent, awaiting response... 200 OK\n",
202
- "Length: 77854 (76K) [application/octet-stream]\n",
203
- "Saving to: ‘ninja-linux.zip’\n",
204
- "\n",
205
- "ninja-linux.zip 100%[===================>] 76.03K --.-KB/s in 0.002s \n",
206
- "\n",
207
- "2021-06-13 05:31:09 (32.1 MB/s) - ‘ninja-linux.zip’ saved [77854/77854]\n",
208
- "\n",
209
- "Archive: ninja-linux.zip\n",
210
- " inflating: /usr/local/bin/ninja \n",
211
- "update-alternatives: using /usr/local/bin/ninja to provide /usr/bin/ninja (ninja) in auto mode\n"
212
- ]
213
- }
214
- ],
215
- "source": [
216
- "!wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip\n",
217
- "!sudo unzip ninja-linux.zip -d /usr/local/bin/\n",
218
- "!sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force"
219
- ]
220
- },
221
- {
222
- "cell_type": "markdown",
223
- "metadata": {
224
- "id": "MHS0GTSK1hnf"
225
- },
226
- "source": [
227
- "# Main Code\n",
228
- "\n"
229
- ]
230
- },
231
- {
232
- "cell_type": "code",
233
- "execution_count": null,
234
- "metadata": {
235
- "colab": {
236
- "base_uri": "https://localhost:8080/"
237
- },
238
- "id": "NOo9qIPb1Ivn",
239
- "outputId": "268e81d3-bb69-42c0-a5ca-704cce5b37a8"
240
- },
241
- "outputs": [
242
- {
243
- "name": "stdout",
244
- "output_type": "stream",
245
- "text": [
246
- "0 Solvay_conference_1927.png\n"
247
- ]
248
- }
249
- ],
250
- "source": [
251
- "!python face_enhancement.py"
252
- ]
253
- },
254
- {
255
- "cell_type": "code",
256
- "execution_count": null,
257
- "metadata": {
258
- "colab": {
259
- "base_uri": "https://localhost:8080/"
260
- },
261
- "id": "XeAs7bPg1KpZ",
262
- "outputId": "a9e0b3de-d00b-4329-b079-2ed52a22f58a"
263
- },
264
- "outputs": [
265
- {
266
- "name": "stdout",
267
- "output_type": "stream",
268
- "text": [
269
- "0 examples/grays/106000_gray.png\n"
270
- ]
271
- }
272
- ],
273
- "source": [
274
- "!python face_colorization.py"
275
- ]
276
- },
277
- {
278
- "cell_type": "markdown",
279
- "metadata": {
280
- "id": "WDCm7xJee4c2"
281
- },
282
- "source": [
283
- "# Download multiple images all at once"
284
- ]
285
- },
286
- {
287
- "cell_type": "markdown",
288
- "metadata": {
289
- "id": "6zC7BIgHe-Z2"
290
- },
291
- "source": [
292
- "download on outs"
293
- ]
294
- },
295
- {
296
- "cell_type": "code",
297
- "execution_count": null,
298
- "metadata": {
299
- "id": "4tAqGLPpe0Ez"
300
- },
301
- "outputs": [],
302
- "source": [
303
- "!zip -r /content/GPEN/examples/outs.zip /content/GPEN/examples/outs"
304
- ]
305
- },
306
- {
307
- "cell_type": "markdown",
308
- "metadata": {
309
- "id": "0W7jMJlqfD2R"
310
- },
311
- "source": [
312
- "download on couts"
313
- ]
314
- },
315
- {
316
- "cell_type": "code",
317
- "execution_count": null,
318
- "metadata": {
319
- "id": "B3-JMXM3e0wT"
320
- },
321
- "outputs": [],
322
- "source": [
323
- "!zip -r /content/GPEN/examples/couts.zip /content/GPEN/examples/couts"
324
- ]
325
- }
326
- ],
327
- "metadata": {
328
- "accelerator": "GPU",
329
- "colab": {
330
- "name": "GPEN.ipynb",
331
- "provenance": []
332
- },
333
- "kernelspec": {
334
- "display_name": "Python 3",
335
- "name": "python3"
336
- },
337
- "language_info": {
338
- "name": "python"
339
- }
340
- },
341
- "nbformat": 4,
342
- "nbformat_minor": 0
343
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,9 +1,12 @@
1
  import os
2
-
3
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/RetinaFace-R50.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116085&Signature=GlUNW6%2B8FxvxWmE9jKIZYOOciKQ%3D" -O weights/RetinaFace-R50.pth')
4
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-512.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116208&Signature=hBgvVvKVSNGeXqT8glG%2Bd2t2OKc%3D" -O weights/GPEN-512.pth')
5
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Colorization-1024.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116315&Signature=9tPavW2h%2F1LhIKiXj73sTQoWqcc%3D" -O weights/GPEN-1024-Color.pth ')
6
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/realesrnet_x2.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1962694780&Signature=lI%2FolhA%2FyigiTRvoDIVbtMIyhjI%3D" -O weights/realesrnet_x2.pth ')
 
 
 
7
 
8
  import gradio as gr
9
 
@@ -13,9 +16,66 @@ import gradio as gr
13
  '''
14
  import os
15
  import cv2
 
 
 
 
 
 
 
16
  from face_enhancement import FaceEnhancement
17
  from face_colorization import FaceColorization
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def inference(file, mode):
21
 
@@ -25,18 +85,34 @@ def inference(file, mode):
25
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
26
  cv2.imwrite(os.path.join("e.png"), img)
27
 
 
28
  if mode == "enhance":
29
  return os.path.join("e.png")
30
  elif mode == "colorize":
31
  model = {'name':'GPEN-1024-Color', 'size':1024}
32
  grayf = cv2.imread("e.png", cv2.IMREAD_GRAYSCALE)
33
- grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
34
  facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
35
  colorf = facecolorizer.process(grayf)
36
 
37
  colorf = cv2.resize(colorf, (grayf.shape[1], grayf.shape[0]))
38
  cv2.imwrite(os.path.join("output.png"), colorf)
39
  return os.path.join("output.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  else:
41
  faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=True)
42
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
@@ -52,14 +128,16 @@ article = "<p style='text-align: center;'><a href='https://arxiv.org/abs/2105.06
52
 
53
  gr.Interface(
54
  inference,
55
- [gr.inputs.Image(type="filepath", label="Input"),gr.inputs.Radio(["enhance", "colorize", "enhanced+background"], type="value", default="enhance", label="Type")],
56
  gr.outputs.Image(type="file", label="Output"),
57
  title=title,
58
  description=description,
59
  article=article,
60
  examples=[
61
- ['enhance.png', 'Enhance'],
62
- ['color.png', 'Colorization']
 
 
63
  ],
64
  enable_queue=True
65
  ).launch()
 
1
  import os
2
+
3
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/RetinaFace-R50.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116085&Signature=GlUNW6%2B8FxvxWmE9jKIZYOOciKQ%3D" -O weights/RetinaFace-R50.pth')
4
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-BFR-512.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116208&Signature=hBgvVvKVSNGeXqT8glG%2Bd2t2OKc%3D" -O weights/GPEN-512.pth')
5
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Colorization-1024.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116315&Signature=9tPavW2h%2F1LhIKiXj73sTQoWqcc%3D" -O weights/GPEN-1024-Color.pth ')
6
  os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/realesrnet_x2.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1962694780&Signature=lI%2FolhA%2FyigiTRvoDIVbtMIyhjI%3D" -O weights/realesrnet_x2.pth ')
7
+ os.system('wget "https://public-vigen-video.oss-cn-shanghai.aliyuncs.com/robin/models/GPEN-Inpainting-1024.pth?OSSAccessKeyId=LTAI4G6bfnyW4TA4wFUXTYBe&Expires=1961116338&Signature=tvYhdLaLgW7UdcUrApXp2jsek8w%3D" -O weights/GPEN-Inpainting-1024.pth ')
8
+ jksp= os.environ['GPEN-BFR-2048']
9
+ os.system(f'wget "{jksp}" -O weights/GPEN-BFR-2048.pth')
10
 
11
  import gradio as gr
12
 
 
16
  '''
17
  import os
18
  import cv2
19
+ import glob
20
+ import time
21
+ import math
22
+ import argparse
23
+ import numpy as np
24
+ from PIL import Image, ImageDraw
25
+ import __init_paths
26
  from face_enhancement import FaceEnhancement
27
  from face_colorization import FaceColorization
28
+ from face_inpainting import FaceInpainting
29
 
30
+ def brush_stroke_mask(img, color=(255,255,255)):
31
+ min_num_vertex = 8
32
+ max_num_vertex = 28
33
+ mean_angle = 2*math.pi / 5
34
+ angle_range = 2*math.pi / 15
35
+ min_width = 12
36
+ max_width = 80
37
+ def generate_mask(H, W, img=None):
38
+ average_radius = math.sqrt(H*H+W*W) / 8
39
+ mask = Image.new('RGB', (W, H), 0)
40
+ if img is not None: mask = img #Image.fromarray(img)
41
+
42
+ for _ in range(np.random.randint(1, 4)):
43
+ num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
44
+ angle_min = mean_angle - np.random.uniform(0, angle_range)
45
+ angle_max = mean_angle + np.random.uniform(0, angle_range)
46
+ angles = []
47
+ vertex = []
48
+ for i in range(num_vertex):
49
+ if i % 2 == 0:
50
+ angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
51
+ else:
52
+ angles.append(np.random.uniform(angle_min, angle_max))
53
+
54
+ h, w = mask.size
55
+ vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))
56
+ for i in range(num_vertex):
57
+ r = np.clip(
58
+ np.random.normal(loc=average_radius, scale=average_radius//2),
59
+ 0, 2*average_radius)
60
+ new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
61
+ new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
62
+ vertex.append((int(new_x), int(new_y)))
63
+
64
+ draw = ImageDraw.Draw(mask)
65
+ width = int(np.random.uniform(min_width, max_width))
66
+ draw.line(vertex, fill=color, width=width)
67
+ for v in vertex:
68
+ draw.ellipse((v[0] - width//2,
69
+ v[1] - width//2,
70
+ v[0] + width//2,
71
+ v[1] + width//2),
72
+ fill=color)
73
+
74
+ return mask
75
+
76
+ width, height = img.size
77
+ mask = generate_mask(height, width, img)
78
+ return mask
79
 
80
  def inference(file, mode):
81
 
 
85
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
86
  cv2.imwrite(os.path.join("e.png"), img)
87
 
88
+
89
  if mode == "enhance":
90
  return os.path.join("e.png")
91
  elif mode == "colorize":
92
  model = {'name':'GPEN-1024-Color', 'size':1024}
93
  grayf = cv2.imread("e.png", cv2.IMREAD_GRAYSCALE)
94
+ grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR)
95
  facecolorizer = FaceColorization(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
96
  colorf = facecolorizer.process(grayf)
97
 
98
  colorf = cv2.resize(colorf, (grayf.shape[1], grayf.shape[0]))
99
  cv2.imwrite(os.path.join("output.png"), colorf)
100
  return os.path.join("output.png")
101
+ elif mode == "inpainting":
102
+ model = {'name':'GPEN-Inpainting-1024', 'size':1024}
103
+ faceinpainter = FaceInpainting(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
104
+ im = np.asarray(brush_stroke_mask(Image.fromarray(im)))
105
+ inpaint = faceinpainter.process(im)
106
+
107
+ cv2.imwrite(os.path.join("output.png"), inpaint)
108
+ return os.path.join("output.png")
109
+ elif mode == "selfie":
110
+ model = {'name':'GPEN-BFR-2048', 'size':2048}
111
+ im = cv2.resize(im, (0,0), fx=4, fy=4)
112
+ faceenhancer = FaceEnhancement(size=model['size'], model=model['name'], channel_multiplier=2, device='cpu')
113
+ img, orig_faces, enhanced_faces = faceenhancer.process(im)
114
+ cv2.imwrite(os.path.join("output.png"), img)
115
+ return os.path.join("output.png")
116
  else:
117
  faceenhancer = FaceEnhancement(size=512, model='GPEN-512', channel_multiplier=2, device='cpu', u=True)
118
  img, orig_faces, enhanced_faces = faceenhancer.process(im)
 
128
 
129
  gr.Interface(
130
  inference,
131
+ [gr.inputs.Image(type="filepath", label="Input"),gr.inputs.Radio(["enhance", "colorize", "inpainting", "selfie", "enhanced+background"], type="value", default="enhance", label="Type")],
132
  gr.outputs.Image(type="file", label="Output"),
133
  title=title,
134
  description=description,
135
  article=article,
136
  examples=[
137
+ ['enhance.png', 'enhance'],
138
+ ['color.png', 'colorize'],
139
+ ['inpainting.png', 'inpainting'],
140
+ ['selfie.png', 'selfie']
141
  ],
142
  enable_queue=True
143
  ).launch()
face_colorization.py CHANGED
@@ -21,31 +21,3 @@ class FaceColorization(object):
21
  out = self.facegan.process(gray)
22
 
23
  return out
24
-
25
-
26
- if __name__=='__main__':
27
- model = {'name':'GPEN-1024-Color', 'size':1024}
28
-
29
- indir = 'examples/grays'
30
- outdir = 'examples/couts'
31
- os.makedirs(outdir, exist_ok=True)
32
-
33
- facecolorizer = FaceColorization(size=model['size'], model=model['name'])
34
-
35
- files = sorted(glob.glob(os.path.join(indir, '*.*g')))
36
- for n, file in enumerate(files[:]):
37
- filename = os.path.basename(file)
38
-
39
- grayf = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
40
- grayf = cv2.cvtColor(grayf, cv2.COLOR_GRAY2BGR) # channel: 1->3
41
-
42
- colorf = facecolorizer.process(grayf)
43
-
44
- colorf = cv2.resize(colorf, (grayf.shape[1], grayf.shape[0]))
45
-
46
- cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'.jpg'), grayf)
47
- cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'_COMP.jpg'), np.hstack((grayf, colorf)))
48
- cv2.imwrite(os.path.join(outdir, '.'.join(filename.split('.')[:-1])+'_GPEN.jpg'), colorf)
49
-
50
- if n%10==0: print(n, file)
51
-
 
21
  out = self.facegan.process(gray)
22
 
23
  return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
face_inpainting.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
3
+ @author: yangxy (yangtao9009@gmail.com)
4
+ '''
5
+ from face_model.face_gan import FaceGAN
6
+
7
+ class FaceInpainting(object):
8
+ def __init__(self, base_dir='./', size=1024, out_size=1024, model=None, channel_multiplier=2, narrow=1, key=None, device='cuda'):
9
+ self.facegan = FaceGAN(base_dir, size, out_size, model, channel_multiplier, narrow, key, device=device)
10
+
11
+ # make sure the face image is well aligned. Please refer to face_enhancement.py
12
+ def process(self, brokenf, aligned=True):
13
+ # complete the face
14
+ out = self.facegan.process(brokenf)
15
+
16
+ return out
17
+
18
+
face_model/face_gan.py CHANGED
@@ -19,7 +19,7 @@ class FaceGAN(object):
19
  self.device = device
20
  self.is_norm = is_norm
21
  self.in_resolution = size
22
- self.out_resolution = size if out_size==None else out_size
23
  self.key = key
24
  self.load_model(channel_multiplier, narrow)
25
 
@@ -40,6 +40,7 @@ class FaceGAN(object):
40
 
41
  with torch.no_grad():
42
  out, __ = self.model(img_t)
 
43
 
44
  out = self.tensor2img(out)
45
 
 
19
  self.device = device
20
  self.is_norm = is_norm
21
  self.in_resolution = size
22
+ self.out_resolution = size if out_size == None else out_size
23
  self.key = key
24
  self.load_model(channel_multiplier, narrow)
25
 
 
40
 
41
  with torch.no_grad():
42
  out, __ = self.model(img_t)
43
+ del img_t
44
 
45
  out = self.tensor2img(out)
46
 
inpainting.png ADDED

Git LFS Details

  • SHA256: b7ea07341a37da1fb4c47c49b41f0b697be9865a6aaa5e8bd3a1cf2e0cd3b016
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
requirements.txt CHANGED
@@ -4,4 +4,6 @@ torchvision
4
  opencv-python
5
  numpy
6
  scikit-image
7
- pillow
 
 
 
4
  opencv-python
5
  numpy
6
  scikit-image
7
+ pillow
8
+ scikit-learn
9
+ joblib
selfie.png ADDED