sichaolong commited on
Commit
4baf7bf
1 Parent(s): 4f0aaa3

Upload 97 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +4 -6
  2. __init__.py +11 -0
  3. app.py +53 -0
  4. app/build/asset-manifest.json +16 -0
  5. app/build/index.html +18 -0
  6. app/build/static/css/main.e24c9a9b.css +1 -0
  7. app/build/static/js/main.a2cdd7a2.js +0 -0
  8. app/build/static/js/main.a2cdd7a2.js.LICENSE.txt +60 -0
  9. app/build/static/js/main.ca662570.js +0 -0
  10. app/build/static/js/main.ca662570.js.LICENSE.txt +60 -0
  11. app/build/static/js/main.ed69b879.js +0 -0
  12. app/build/static/js/main.ed69b879.js.LICENSE.txt +60 -0
  13. app/build/static/media/WorkSans-Black.67c2c5a144333953880b.ttf +0 -0
  14. app/build/static/media/WorkSans-Bold.2bea7a7f7d052c74da25.ttf +0 -0
  15. app/build/static/media/WorkSans-Regular.bb287b894b27372d8ea7.ttf +0 -0
  16. app/build/static/media/WorkSans-SemiBold.1e98db4eb705b586728e.ttf +0 -0
  17. app/build/static/media/coffee-machine-lineal.ee32631219cc3986f861.gif +0 -0
  18. benchmark.py +108 -0
  19. const.py +80 -0
  20. ext/__init__.py +1 -0
  21. ext/__pycache__/__init__.cpython-38.pyc +0 -0
  22. ext/__pycache__/__init__.cpython-39.pyc +0 -0
  23. ext/__pycache__/image_watermark_handler.cpython-38.pyc +0 -0
  24. ext/__pycache__/image_watermark_handler.cpython-39.pyc +0 -0
  25. ext/image_watermark_handler.py +93 -0
  26. ext/request_info.txt +44 -0
  27. ext/test.py +341 -0
  28. file_manager/__init__.py +1 -0
  29. file_manager/__pycache__/__init__.cpython-38.pyc +0 -0
  30. file_manager/__pycache__/__init__.cpython-39.pyc +0 -0
  31. file_manager/__pycache__/file_manager.cpython-38.pyc +0 -0
  32. file_manager/__pycache__/file_manager.cpython-39.pyc +0 -0
  33. file_manager/__pycache__/storage_backends.cpython-38.pyc +0 -0
  34. file_manager/__pycache__/storage_backends.cpython-39.pyc +0 -0
  35. file_manager/__pycache__/utils.cpython-38.pyc +0 -0
  36. file_manager/__pycache__/utils.cpython-39.pyc +0 -0
  37. file_manager/file_manager.py +264 -0
  38. file_manager/storage_backends.py +46 -0
  39. file_manager/utils.py +67 -0
  40. helper.py +284 -0
  41. interactive_seg.py +203 -0
  42. make_gif.py +125 -0
  43. model/__init__.py +0 -0
  44. model/__pycache__/__init__.cpython-38.pyc +0 -0
  45. model/__pycache__/__init__.cpython-39.pyc +0 -0
  46. model/__pycache__/base.cpython-38.pyc +0 -0
  47. model/__pycache__/base.cpython-39.pyc +0 -0
  48. model/__pycache__/fcf.cpython-38.pyc +0 -0
  49. model/__pycache__/instruct_pix2pix.cpython-38.pyc +0 -0
  50. model/__pycache__/lama.cpython-38.pyc +0 -0
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  title: Lama Cleaner Demo
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.23.0
8
- app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
  ---
 
1
  ---
2
  title: Lama Cleaner Demo
3
+ emoji: 👀
4
+ colorFrom: purple
5
+ colorTo: purple
6
+ sdk: docker
 
 
7
  pinned: false
8
  license: apache-2.0
9
  ---
__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ warnings.simplefilter("ignore", UserWarning)
3
+
4
+ from parse_args import parse_args
5
+
6
+ def entry_point():
7
+ args = parse_args()
8
+ # To make os.environ["XDG_CACHE_HOME"] = args.model_cache_dir works for diffusers
9
+ # https://github.com/huggingface/diffusers/blob/be99201a567c1ccd841dc16fb24e88f7f239c187/src/diffusers/utils/constants.py#L18
10
+ from lama_cleaner.server import main
11
+ main(args)
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from pydantic import BaseModel
4
+ from server import main
5
+ """
6
+ LAMA Cleaner是一款用于图像去噪的工具,它使用了一种称为“局部自适应均值”的算法来去除图像中的噪声。
7
+ 在使用LAMA Cleaner时,您需要提供一个掩码图像,以指示哪些区域需要去噪,哪些区域不需要去噪。
8
+ 掩码图像可以是任何格式的图像文件,例如PNG、JPEG或BMP。
9
+ 您可以使用任何图像编辑软件(例如Photoshop或GIMP)创建掩码图像。在掩码图像中,您需要使用黑色和白色来表示需要去噪和不需要去噪的区域。黑色表示需要去噪的区域,白色表示不需要去噪的区域。
10
+
11
+ 创建掩码图像的步骤如下:
12
+
13
+ 1、打开您要去噪的图像和一个空白图像。
14
+ 2、在空白图像上使用画笔工具绘制黑色和白色的区域,以指示需要去噪和不需要去噪的区域。
15
+ 3、将掩码图像保存为PNG、JPEG或BMP格式。
16
+ 4、在使用LAMA Cleaner时,将掩码图像作为输入参数传递给它。
17
+ 请注意,掩码图像的质量对去噪效果有很大影响。因此,您需要花费一些时间来创建一个准确的掩码图像,以获得最佳的去噪效果。
18
+
19
+
20
+ 输入图像:要去噪的原始图像。
21
+ 掩码图像:指示哪些区域需要去噪,哪些区域不需要去噪的掩码图像。掩码图像可以是任何格式的图像文件,例如PNG、JPEG或BMP。在掩码图像中,您需要使用黑色和白色来表示需要去噪和不需要去噪的区域。黑色表示需要去噪的区域,白色表示不需要去噪的区域。
22
+ 块大小:用于计算局部均值的块的大小。块大小越大,去噪效果越好,但计算时间也会增加。
23
+ 块步长:用于计算局部均值的块的步长。步长越小,去噪效果越好,但计算时间也会增加。
24
+ 搜索窗口大小:用于搜索最佳匹配块的窗口大小。窗口大小越大,去噪效果越好,但计算时间也会增加。
25
+ 相似度阈值:用于确定最佳匹配块的相似度阈值。相似度阈值越小,去噪效果越好,但计算时间也会增加。
26
+ 去噪强度:控制去噪的强度。去噪强度越大,去噪效果越好,但可能会导致图像细节的丢失。
27
+ 请注意,这些参数的最佳值取决于您的图像和应用场景。您需要根据实际情况进行调整,以获得最佳的去噪效果。
28
+ """
29
+ class FakeArgs(BaseModel):
30
+ host: str = "127.0.0.1"
31
+ port: int = 7860
32
+ model: str = 'lama' # 使用的模型
33
+ hf_access_token: str = ""
34
+ sd_disable_nsfw: bool = False # 禁用稳定扩散NSFW检查器。
35
+ sd_cpu_textencoder: bool = True # 始终在CPU上运行稳定扩散TextEncoder模型。
36
+ sd_run_local: bool = False
37
+ sd_enable_xformers: bool = False
38
+ local_files_only: bool = False
39
+ cpu_offload: bool = False
40
+ device: str = "cpu" # CUDA /中央处理器/多处理器
41
+ gui: bool = False
42
+ gui_size: List[int] = [1000, 1000]
43
+ input: str = ''
44
+ disable_model_switch: bool = False
45
+ debug: bool = False
46
+ no_half: bool = False
47
+ disable_nsfw: bool = False
48
+ enable_xformers: bool = False
49
+ model_dir: str = ""
50
+ output_dir: str = "resources" # 自己指定文件上传的位置
51
+
52
+ if __name__ == "__main__":
53
+ main(FakeArgs())
app/build/asset-manifest.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "files": {
3
+ "main.css": "/static/css/main.e24c9a9b.css",
4
+ "main.js": "/static/js/main.ca662570.js",
5
+ "static/media/coffee-machine-lineal.gif": "/static/media/coffee-machine-lineal.ee32631219cc3986f861.gif",
6
+ "static/media/WorkSans-SemiBold.ttf": "/static/media/WorkSans-SemiBold.1e98db4eb705b586728e.ttf",
7
+ "static/media/WorkSans-Bold.ttf": "/static/media/WorkSans-Bold.2bea7a7f7d052c74da25.ttf",
8
+ "static/media/WorkSans-Regular.ttf": "/static/media/WorkSans-Regular.bb287b894b27372d8ea7.ttf",
9
+ "static/media/WorkSans-Black.ttf": "/static/media/WorkSans-Black.67c2c5a144333953880b.ttf",
10
+ "index.html": "/index.html"
11
+ },
12
+ "entrypoints": [
13
+ "static/css/main.e24c9a9b.css",
14
+ "static/js/main.ca662570.js"
15
+ ]
16
+ }
app/build/index.html ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate"/>
5
+ <meta http-equiv="Pragma" content="no-cache"/>
6
+ <meta http-equiv="Expires" content="0"/>
7
+ <meta charset="utf-8"/>
8
+ <meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=0"/>
9
+ <meta name="theme-color" content="#ffffff"/>
10
+ <title>lama-cleaner - Image inpainting powered by SOTA AI model</title>
11
+ <script defer="defer" src="/static/js/main.ca662570.js"></script>
12
+ <link href="/static/css/main.e24c9a9b.css" rel="stylesheet">
13
+ </head>
14
+ <body>
15
+ <noscript>You need to enable JavaScript to run this app.</noscript>
16
+ <div id="root"></div>
17
+ </body>
18
+ </html>
app/build/static/css/main.e24c9a9b.css ADDED
@@ -0,0 +1 @@
 
 
1
+ :root{--blackA1:rgba(0,0,0,.012);--blackA2:rgba(0,0,0,.027);--blackA3:rgba(0,0,0,.047);--blackA4:rgba(0,0,0,.071);--blackA5:rgba(0,0,0,.09);--blackA6:rgba(0,0,0,.114);--blackA7:rgba(0,0,0,.141);--blackA8:rgba(0,0,0,.22);--blackA9:rgba(0,0,0,.439);--blackA10:rgba(0,0,0,.478);--blackA11:rgba(0,0,0,.565);--blackA12:rgba(0,0,0,.91);--mauve1:#fdfcfd;--mauve2:#f9f8f9;--mauve3:#f4f2f4;--mauve4:#eeedef;--mauve5:#e9e8ea;--mauve6:#e4e2e4;--mauve7:#dcdbdd;--mauve8:#c8c7cb;--mauve9:#908e96;--mauve10:#86848d;--mauve11:#6f6e77;--mauve12:#1a1523;--violet1:#fdfcfe;--violet2:#fbfaff;--violet3:#f5f2ff;--violet4:#ede9fe;--violet5:#e4defc;--violet6:#d7cff9;--violet7:#c4b8f3;--violet8:#aa99ec;--violet9:#6e56cf;--violet10:#644fc1;--violet11:#5746af;--violet12:#20134b;--page-bg:#fff;--page-bg-light:hsla(0,0%,100%,.5);--page-text-color:#040404;--yellow-accent:#fc0;--yellow-accent-light:#ffcc0055;--link-color:#000;--border-color:#eff1f4;--border-color-light:hsla(240,9%,43%,.5);--tooltip-bg:#e6e6ea;--tooltip-text-color:#000;--error-color:#ef4444;--success-color:#10b981;--editor-toolkit-bg:hsla(0,0%,100%,.5);--editor-options-bg:#e6e6ea;--options-text-color:var(--page-text-color);--editor-size-border-color:var(--border-color);--editor-toolkit-panel-border:0;--modal-bg:var(--page-bg);--modal-text-color:#000;--modal-hotkey-border-color:#000;--model-mask-bg:rgba(209,213,219,.4);--text-color:#040404;--text-color-gray:#6b6f76;--btn-text-color:var(--text-color);--btn-text-hover-color:#040404;--btn-border-color:#646478;--btn-primary-hover-bg:var(--yellow-accent);--animation-pulsing-bg:hsla(0,0%,100%,.5);--switch-root-background-color:#dfe1e4;--switch-thumb-color:var(--page-bg);--switch-thumb-checked-color:var(--page-bg);--slider-background-color:var(--switch-root-background-color);--tooltip-bg:var(--page-bg);--badge-background-color:#f1f3f5;--badge-color:#687076;--box-shadow:inset 0 0.5px hsla(0,0%,100%,.1),inset 0 1px 5px #f8f9fa,0px 0px 0px 0.5px #c1c8cd,0px 2px 1px -1px #c1c8cd,0 1px #c1c8cd;--croper-bg:rgba(0,0,0,.5);--tabs-active-color:#f0f3f9}@font-face{font-family:WorkSans;src:url(/static/media/WorkSans-Regular.bb287b894b27372d8ea7.ttf)}@font-face{font-family:WorkSans-Semibold;src:url(/static/media/WorkSans-SemiBold.1e98db4eb705b586728e.ttf)}@font-face{font-family:WorkSans-Bold;src:url(/static/media/WorkSans-Bold.2bea7a7f7d052c74da25.ttf)}@font-face{font-family:WorkSans-Black;src:url(/static/media/WorkSans-Black.67c2c5a144333953880b.ttf)}[data-theme=dark]{--page-bg:#040404;--page-bg-light:#04040488;--page-text-color:#f9f9f9;--yellow-accent:#fc0;--yellow-accent-light:#ffcc0055;--link-color:var(--yellow-accent);--border-color:#1e1e1e;--border-color-light:#666;--tooltip-bg:#212121;--tooltip-text-color:#d2d2d2;--editor-toolkit-bg:rgba(0,0,0,.5);--editor-options-bg:#212121;--options-text-color:var(--page-text-color);--editor-size-border-color:var(--yellow-accent);--editor-toolkit-panel-border:1px solid hsla(240,9%,43%,.4);--modal-bg:var(--page-bg);--modal-text-color:var(--page-text-color);--modal-hotkey-border-color:var(--page-text-color);--model-mask-bg:rgba(76,76,87,.4);--text-color:#fff;--text-color-gray:#c3c4c6;--btn-text-color:var(--text-color);--btn-text-hover-color:var(--page-bg);--btn-border-color:var(--yellow-accent);--btn-primary-hover-bg:var(--yellow-accent);--animation-pulsing-bg:#f0f0ff;--switch-root-background-color:#3c3f44;--switch-thumb-color:#1f2023;--switch-thumb-checked-color:#fff;--slider-background-color:var(--switch-root-background-color);--badge-background-color:#202425;--badge-color:#9ba1a6;--box-shadow:inset 0 0.5px hsla(0,0%,100%,.1),inset 0 1px 5px #1a1d1e,0px 0px 0px 0.5px #4c5155,0px 2px 1px -1px #4c5155,0 1px #4c5155;--croper-bg:rgba(0,0,0,.5);--tabs-active-color:#272831}@supports (color:hsl(0 0% 0%/0)){[data-theme=dark]{--tooltip-bg:#202425}}@-webkit-keyframes pulsing{0%{opacity:1}50%{background-color:hsla(0,0%,100%,.5);background-color:var(--animation-pulsing-bg);opacity:.75}to{opacity:1}}@keyframes pulsing{0%{opacity:1}50%{background-color:hsla(0,0%,100%,.5);background-color:var(--animation-pulsing-bg);opacity:.75}to{opacity:1}}@-webkit-keyframes opacityReveal{0%{opacity:0}to{opacity:1}}@keyframes opacityReveal{0%{opacity:0}to{opacity:1}}@-webkit-keyframes slideDown{0%{-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideDown{0%{-webkit-transform:translateY(-100%);transform:translateY(-100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideUp{0%{-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideUp{0%{-webkit-transform:translateY(100%);transform:translateY(100%)}to{-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideIn{0%{-webkit-transform:translateX(calc(100% + 25px));transform:translateX(calc(100% + 25px))}to{-webkit-transform:translateX(0);transform:translateX(0)}}@keyframes slideIn{0%{-webkit-transform:translateX(calc(100% + 25px));transform:translateX(calc(100% + 25px))}to{-webkit-transform:translateX(0);transform:translateX(0)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@-webkit-keyframes slideUpAndFade{0%{opacity:0;-webkit-transform:translateY(2px);transform:translateY(2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideUpAndFade{0%{opacity:0;-webkit-transform:translateY(2px);transform:translateY(2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@-webkit-keyframes slideDownAndFade{0%{opacity:0;-webkit-transform:translateY(-2px);transform:translateY(-2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}@keyframes slideDownAndFade{0%{opacity:0;-webkit-transform:translateY(-2px);transform:translateY(-2px)}to{opacity:1;-webkit-transform:translateY(0);transform:translateY(0)}}.lama-cleaner{background-color:#fff;background-color:var(--page-bg);color:#040404;color:var(--page-text-color);display:grid;grid-template-areas:"main-content";height:100vh;transition-duration:.2s;transition-property:background-color,color;transition-timing-function:repeat(2,ease-out);width:100vw}a{color:inherit;text-decoration:inherit}input:disabled{color:#6b6f76;color:var(--text-color-gray)}.editor-container{align-items:center;display:flex;height:100vh;justify-content:center;width:100vw}.react-transform-wrapper{display:grid!important;height:100%!important;width:100%!important}.editor-canvas-container{grid-row-gap:1rem;display:grid;grid-template-areas:"editor-content";row-gap:1rem}.editor-canvas{grid-area:editor-content;z-index:2}.original-image-container{display:grid;grid-area:editor-content;grid-template-areas:"original-image-content";pointer-events:none}.original-image-container img{grid-area:original-image-content}.original-image-container .editor-slider{background-color:#fc0;background-color:var(--yellow-accent);grid-area:original-image-content;height:100%;justify-self:end;transition:all .3s cubic-bezier(.4,0,.2,1);width:6px;z-index:2}.editor-canvas-loading{-webkit-animation:pulsing .75s infinite;animation:pulsing .75s infinite;pointer-events:none}.editor-toolkit-panel{align-items:center;-webkit-animation:slideUp .2s ease-out;animation:slideUp .2s ease-out;-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:hsla(0,0%,100%,.5);background-color:var(--page-bg-light);border:0;border:var(--editor-toolkit-panel-border);border-radius:3rem;bottom:.5rem;box-shadow:0 0 0 1px rgba(0,0,0,.102),0 3px 16px rgba(0,0,0,.078),0 2px 6px 1px rgba(0,0,0,.09);display:flex;gap:16px;justify-content:center;padding:.6rem 32px;position:fixed}@media screen and (max-width:767px){.editor-toolkit-panel{grid-template-areas:"toolkit-size-selector toolkit-size-selector" "toolkit-brush-slider toolkit-brush-slider" "toolkit-btns toolkit-btns";justify-items:center;padding:1rem 2rem;row-gap:2rem}}.editor-toolkit-panel .eyeicon-active{background-color:#fc0;background-color:var(--yellow-accent);color:#040404;color:var(--btn-text-hover-color)}.editor-brush-slider{grid-column-gap:1rem;align-items:center;-webkit-column-gap:1rem;column-gap:1rem;display:grid;grid-area:toolkit-brush-slider;grid-template-columns:repeat(2,-webkit-max-content);grid-template-columns:repeat(2,max-content);height:-webkit-max-content;height:max-content;-webkit-user-select:none;user-select:none}.editor-brush-slider input[type=range]{-webkit-appearance:none;appearance:none;background:transparent;border-color:transparent;color:transparent;cursor:pointer;width:100%}.editor-brush-slider input[type=range]:focus{outline:none}.editor-brush-slider input[type=range]::-webkit-slider-thumb{-webkit-appearance:none;background:#fc0;background:var(--yellow-accent);border:1px solid #000;border-radius:50%;height:1.2rem;margin-top:-.5rem;width:1.2rem;z-index:2}.editor-brush-slider input[type=range]::-webkit-slider-runnable-track{background:#dfe1e4;background:var(--slider-background-color);border-radius:2rem;height:.2rem}.editor-brush-slider input[type=range]::-moz-range-track{background:#dfe1e4;background:var(--slider-background-color);border-radius:2rem}.editor-brush-slider input[type=range]::-moz-range-progress{background:#fc0;background:var(--yellow-accent)}.editor-toolkit-btns{display:flex;gap:12px}.brush-shape{background-color:rgba(255,204,0,.733);border:1px solid #fc0;border:1px solid var(--yellow-accent);border-radius:50%;pointer-events:none;position:absolute}.file-manager-modal{color:#040404;color:var(--text-color);height:90%;width:80%}.react-photo-album.react-photo-album--columns{height:80vh}.react-photo-album--photo{border:1px solid transparent;border-radius:8px;transition:visibility .25s ease-in,-webkit-transform .25s;transition:transform .25s,visibility .25s ease-in;transition:transform .25s,visibility .25s ease-in,-webkit-transform .25s;-webkit-user-select:none;user-select:none}.react-photo-album--photo:hover{border:1px solid #eff1f4;border:1px solid var(--border-color);-webkit-transform:scale(1.03);transform:scale(1.03)}.ScrollAreaRoot{--scrollbar-size:10px;border-radius:4px;overflow:hidden}.ScrollAreaViewport{border-radius:inherit;height:100%;width:100%}.ScrollAreaScrollbar{display:flex;padding:2px;touch-action:none;transition:background .16s ease-out;-webkit-user-select:none;user-select:none}.ScrollAreaScrollbar:hover{background:var(--blackA8)}.ScrollAreaScrollbar[data-orientation=vertical]{width:var(--scrollbar-size)}.ScrollAreaScrollbar[data-orientation=horizontal]{flex-direction:column;height:var(--scrollbar-size)}.ScrollAreaThumb{background:var(--mauve10);border-radius:var(--scrollbar-size);flex:1 1;position:relative}.ScrollAreaThumb:before{content:"";height:100%;left:50%;min-height:44px;min-width:44px;position:absolute;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);width:100%}.ScrollAreaCorner{background:var(--blackA8)}.file-search-input{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:12px;height:32px;padding-left:30px;width:250px}.sort-btn-inactive svg{opacity:.5}button,fieldset,input{all:unset}.TabsRoot{align-self:flex-start;background-color:#fff;background-color:var(--page-bg);display:flex;flex-direction:column;gap:8px}.TabsList{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:12px;flex-direction:row;gap:6px;padding:4px}.TabsList,.TabsTrigger{background-color:#fff;background-color:var(--page-bg);display:flex;justify-content:flex-start}.TabsTrigger{align-items:center;border-radius:8px;color:#000;color:var(--modal-text-color);font-family:inherit;font-size:15px;line-height:1;padding:8px;-webkit-user-select:none;user-select:none}.TabsTrigger:hover,.TabsTrigger[data-state=active]{background-color:#f0f3f9;background-color:var(--tabs-active-color)}.TabsTrigger:focus{position:relative}.TabsContent{background-color:#fff;background-color:var(--page-bg);outline:none;width:100%}.TabsContent[data-state=active]{display:flex;flex-direction:column;gap:14px}.landing-page{grid-row-gap:2rem;display:grid;grid-auto-rows:-webkit-max-content;grid-auto-rows:max-content;justify-items:center;place-self:center;row-gap:2rem}@media screen and (max-width:767px){.landing-page{padding:1rem}}.landing-page h1{font-size:1.4rem;text-align:center}@media screen and (max-width:767px){.landing-page h1{font-size:1.2rem}}.landing-page a{color:#000;color:var(--link-color)}.landing-file-selector{display:grid}header{align-items:center;-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:hsla(0,0%,100%,.5);background-color:var(--page-bg-light);border-bottom:1px solid hsla(240,9%,43%,.2);display:flex;height:60px;justify-content:space-between;padding:1rem 1.5rem;position:absolute;top:0;width:100%;z-index:20}.shortcuts{z-index:1}.header-icons-wrapper{gap:12px}.header-icons,.header-icons-wrapper{align-items:center;display:flex;justify-content:center;justify-self:end}.header-icons{gap:6px}.mask-preview{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:8px;margin-left:20px;margin-top:30px;max-height:400px;max-width:400px}.prompt-wrapper{display:flex;gap:12px}.prompt-wrapper input{all:unset;border-radius:.5rem;border-width:0;min-width:600px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:0 .8rem}.prompt-wrapper input:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.theme-toggle-ui{transition:all .2s ease-in;-webkit-user-select:none;user-select:none;z-index:10}.theme-toggle-ui .theme-btn{align-items:center;cursor:pointer;display:flex;justify-content:center;outline:none}.theme-toggle-ui .theme-btn svg{height:22px;width:22px}.modal-shortcuts{background-color:#fff;background-color:var(--modal-bg);box-shadow:0 0 20px rgba(0,0,40,.2);color:#000;color:var(--modal-text-color);grid-area:main-content}@media screen and (max-width:767px){.modal-shortcuts{-webkit-animation:slideDown .2s ease-out;animation:slideDown .2s ease-out;display:grid;height:auto;width:100%}}.shortcut-options{display:flex;flex-direction:row;gap:48px}.shortcut-options .shortcut-option{grid-column-gap:2rem;align-items:center;-webkit-column-gap:2rem;column-gap:2rem;display:grid;grid-template-columns:repeat(2,auto)}@media screen and (max-width:767px){.shortcut-options .shortcut-option{-webkit-column-gap:0;column-gap:0;row-gap:.6rem}}.shortcut-options .shortcut-key{background-color:#fff;background-color:var(--page-bg);border-radius:6px;box-shadow:inset 0 .5px hsla(0,0%,100%,.1),inset 0 1px 5px #f8f9fa,0 0 0 .5px #c1c8cd,0 2px 1px -1px #c1c8cd,0 1px #c1c8cd;box-shadow:var(--box-shadow);box-sizing:border-box;color:#000;color:var(--modal-text-color);font-family:inherit;font-weight:400;justify-self:end;line-height:1.5;padding-left:.5rem;padding-right:.5rem;text-shadow:0 0 1px hsla(0,0%,100%,.5);-webkit-user-select:none;user-select:none;white-space:nowrap;width:-webkit-max-content;width:max-content}@media screen and (max-width:767px){.shortcut-options .shortcut-key{padding:.2rem .4rem}}.shortcut-options .shortcut-description{font-size:.95rem;justify-self:start;text-align:left}@media screen and (max-width:767px){.shortcut-options .shortcut-description{justify-self:start;text-align:left;width:auto}}.shortcut-options-column{gap:12px;width:320px}.setting-block,.setting-block .option-desc,.shortcut-options-column{display:flex;flex-direction:column}.setting-block .option-desc{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.3rem;color:#6b6f76;color:var(--text-color-gray);gap:8px;margin-top:12px;padding:1rem}.setting-block .option-desc .sub-setting-block{color:#040404;color:var(--text-color)}.setting-block .option-desc svg{color:#6b6f76;color:var(--text-color-gray)}.setting-block-content{align-items:center;display:flex;gap:12rem;justify-content:space-between}.setting-block-content-v{align-items:flex-start;display:flex;flex-direction:column;gap:1rem;justify-content:flex-start}.setting-block-content-title{align-items:center;display:flex;flex-direction:row;gap:8px;justify-content:center}.setting-block-desc{color:#6b6f76;color:var(--text-color-gray);font-size:1rem;margin-top:8px}.hd-setting-block .inline-tip{color:#040404;color:var(--text-color);cursor:pointer;display:inline}.model-desc-link{border-radius:999px;color:#687076;color:var(--badge-color);display:flex;justify-items:center;padding-left:5px;padding-right:5px;text-decoration:none}.modal-setting{background-color:#fff;background-color:var(--modal-bg);box-shadow:0 0 20px rgba(0,0,40,.2);color:#000;color:var(--modal-text-color);width:680px}@media screen and (max-width:767px){.modal-setting{-webkit-animation:slideDown .2s ease-out;animation:slideDown .2s ease-out;display:grid;height:auto;margin-top:-11rem;width:100%}}.folder-path-block{display:flex;flex-direction:column;gap:12px}.folder-path{border-radius:6px;border-width:0;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:.3rem .5rem;width:95%}.folder-path:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.side-panel{border-color:#eff1f4;border-color:var(--border-color);border-radius:.8rem;border-style:solid;border-width:1px;padding:.3rem;position:absolute;right:1.5rem;top:68px;z-index:4}.side-panel-trigger{border:0;font-family:WorkSans,sans-serif;font-size:16px}.side-panel-content{background-color:#fff;background-color:var(--page-bg);border-color:#eff1f4;border-color:var(--border-color);border-radius:.8rem;border-style:solid;border-width:1px;color:#040404;color:var(--text-color);display:flex;flex-direction:column;font-family:WorkSans,sans-serif;font-size:14px;gap:12px;padding:1rem;position:relative;right:1.5rem;top:1rem;z-index:9}.side-panel-content .setting-block-content{gap:1rem}.negative-prompt{all:unset;border-radius:.5rem;border-width:0;max-width:200px;min-height:150px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:12px .8rem;width:100%}.negative-prompt:focus-visible{border-width:0;outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.negative-prompt:-webkit-input-placeholder{padding-top:10px}.negative-prompt:-moz-input-placeholder{padding-top:10px}.negative-prompt:-ms-input-placeholder{padding-top:10px}.resize-title-tile{color:#6b6f76;color:var(--text-color-gray);font-size:.5rem;width:86px}.crop-border{outline-color:#fc0;outline-color:var(--yellow-accent);outline-style:dashed}.info-bar{align-items:center;background-color:#fff;background-color:var(--page-bg);border:0;border:var(--editor-toolkit-panel-border);border-radius:9999px;box-shadow:0 0 0 1px rgba(0,0,0,.102),0 3px 16px rgba(0,0,0,.078),0 2px 6px 1px rgba(0,0,0,.09);color:#040404;color:var(--text-color);display:flex;font-size:1rem;gap:12px;justify-content:center;padding:.2rem .8rem;pointer-events:auto;position:absolute}.info-bar:hover{cursor:move}.croper-wrapper{height:100%;overflow:hidden;position:absolute;width:100%}.croper,.croper-wrapper{pointer-events:none;z-index:2}.croper{bottom:0;box-shadow:0 0 0 9999px rgba(0,0,0,.5);left:0;position:relative;right:0;top:0}.drag-bar{pointer-events:auto;position:absolute}.drag-bar.ord-top{cursor:ns-resize;height:12px;left:0;margin-top:-6px;top:0;width:100%}.drag-bar.ord-right{cursor:ew-resize;height:100%;margin-right:-6px;right:0;top:0;width:12px}.drag-bar.ord-bottom{bottom:0;cursor:ns-resize;height:12px;left:0;margin-bottom:-6px;width:100%}.drag-bar.ord-left{cursor:ew-resize;height:100%;left:0;margin-left:-6px;top:0;width:12px}.drag-handle{background-color:#ffcc0055;background-color:var(--yellow-accent-light);border:2px solid #fc0;border:2px solid var(--yellow-accent);content:"";display:block;height:12px;pointer-events:auto;position:absolute;width:12px;z-index:4}.drag-handle:hover{background-color:#fc0;background-color:var(--yellow-accent)}.drag-handle.ord-topleft{cursor:nw-resize;left:-7px;top:-7px}.drag-handle.ord-topright{cursor:ne-resize;right:-7px;top:-7px}.drag-handle.ord-bottomright{bottom:-7px;cursor:se-resize;right:-7px}.drag-handle.ord-bottomleft{bottom:-7px;cursor:sw-resize;left:-7px}.drag-handle.ord-bottom,.drag-handle.ord-top{cursor:ns-resize;left:calc(50% - 6px)}.drag-handle.ord-top{top:-7px}.drag-handle.ord-bottom{bottom:-7px}.drag-handle.ord-left,.drag-handle.ord-right{cursor:ew-resize;top:calc(50% - 6px)}.drag-handle.ord-left{left:-7px}.drag-handle.ord-right{right:-7px}.interactive-seg-wrapper{height:100%;overflow:hidden;pointer-events:none;position:absolute;width:100%;z-index:2}.interactive-seg-wrapper .click-item{border-radius:50%;height:8px;position:absolute;width:8px}.interactive-seg-wrapper .click-item-positive{background-color:rgba(21,215,121,.936);outline:6px solid rgba(98,255,179,.31)}.interactive-seg-wrapper .click-item-negative{background-color:rgba(237,49,55,.942);outline:6px solid rgba(255,89,95,.31)}.interactive-seg-confirm-actions{background-color:#fff;background-color:var(--page-bg);border-color:#eff1f4;border-color:var(--border-color);border-radius:16px;border-style:solid;border-width:1px;padding:8px;position:absolute;top:68px;z-index:5}.interactive-seg-confirm-actions .action-buttons{align-items:center;display:flex;gap:8px;justify-content:center}@-webkit-keyframes pulse{to{box-shadow:0 0 0 14px rgba(21,215,121,0)}}@keyframes pulse{to{box-shadow:0 0 0 14px rgba(21,215,121,0)}}.interactive-seg-cursor{-webkit-animation:pulse 1.5s cubic-bezier(.66,0,0,1) infinite;animation:pulse 1.5s cubic-bezier(.66,0,0,1) infinite;background-color:rgba(21,215,121,.936);border-radius:50%;box-shadow:0 0 0 0 rgba(21,215,121,.936);color:rgba(234,255,240,.98);height:20px;pointer-events:none;position:absolute;width:20px}.file-select-label{border:2px dashed #eff1f4;border:2px dashed var(--border-color);border-radius:.5rem;cursor:pointer;display:grid;min-width:600px}@media screen and (max-width:767px){.file-select-label{min-width:300px}}.file-select-label .file-select-label-hover,.file-select-label:hover{background-color:#fc0;background-color:var(--yellow-accent);color:#000}.file-select-container{display:grid;height:100%;padding:4rem;width:100%}.file-select-container input{display:none}.file-select-message{font-family:WorkSans;text-align:center}.btn-primary{grid-column-gap:1rem;background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;color:#040404;color:var(--btn-text-color);-webkit-column-gap:1rem;column-gap:1rem;cursor:pointer;display:grid;font-family:WorkSans,sans-serif;grid-auto-flow:column;padding:.5rem;place-items:center;width:-webkit-max-content;width:max-content;z-index:1}.btn-primary:hover{background-color:#fc0;background-color:var(--btn-primary-hover-bg);color:#040404;color:var(--btn-text-hover-color)}.btn-primary svg{height:auto;width:20px}.btn-primary-disabled{background-color:#fff;background-color:var(--page-bg);opacity:.5;pointer-events:none}.btn-border{border-color:#646478;border-color:var(--btn-border-color);border-style:solid;border-width:1px}.modal-mask{-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);background-color:rgba(209,213,219,.4);background-color:var(--model-mask-bg);inset:0;position:fixed;z-index:9998}@media(prefers-reduced-motion:no-preference){.modal-mask{-webkit-animation:opacityReveal .15s cubic-bezier(.16,1,.3,1) forwards;animation:opacityReveal .15s cubic-bezier(.16,1,.3,1) forwards}}@-webkit-keyframes contentShow{0%{opacity:0;-webkit-transform:translate(-50%,-48%) scale(.96);transform:translate(-50%,-48%) scale(.96)}to{opacity:1;-webkit-transform:translate(-50%,-50%) scale(1);transform:translate(-50%,-50%) scale(1)}}@keyframes contentShow{0%{opacity:0;-webkit-transform:translate(-50%,-48%) scale(.96);transform:translate(-50%,-48%) scale(.96)}to{opacity:1;-webkit-transform:translate(-50%,-50%) scale(1);transform:translate(-50%,-50%) scale(1)}}.modal{background-color:#fff;background-color:var(--page-bg);border-radius:.95rem;display:flex;flex-direction:column;gap:16px;left:50%;padding:25px;place-self:center;position:fixed;top:50%;-webkit-transform:translate(-50%,-50%);transform:translate(-50%,-50%);z-index:9999}.modal:focus{outline:none}.modal .modal-header{align-items:center;display:grid;grid-template-columns:repeat(2,auto)}.modal .modal-header .btn-primary{justify-self:end}@media(prefers-reduced-motion:no-preference){.modal{-webkit-animation:contentShow .15s cubic-bezier(.16,1,.3,1) forwards;animation:contentShow .15s cubic-bezier(.16,1,.3,1) forwards}}.select-trigger{all:unset;align-items:center;background-color:#fff;background-color:var(--page-bg);border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.5rem;color:#040404;color:var(--options-text-color);display:inline-flex;gap:8px;height:32px;justify-content:space-between;padding:0 .8rem}.select-trigger svg{height:1rem;margin-top:.25rem;width:1rem}.select-trigger:hover{border-color:#fc0;border-color:var(--yellow-accent)}.select-trigger:disabled{border-color:#eff1f4;border-color:var(--border-color);color:#eff1f4;color:var(--border-color)}.select-content{background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;overflow:hidden}.select-viewport{border:1px solid #eff1f4;border:1px solid var(--border-color);border-radius:.5rem;padding:5px}.select-item{all:unset;align-items:center;background-color:#fff;background-color:var(--page-bg);border-radius:.5rem;color:#040404;color:var(--options-text-color);display:flex;padding:6px 6px 6px 25px;position:relative;-webkit-user-select:none;user-select:none}.select-item:focus{background-color:#fc0;background-color:var(--yellow-accent);color:#040404;color:var(--btn-text-hover-color)}.select-item-indicator{align-items:center;display:inline-flex;justify-content:center;left:0;padding-right:4px;position:absolute;width:25px}.switch-root{-webkit-tap-highlight-color:rgba(0,0,0,0);all:"unset";background-color:#dfe1e4;background-color:var(--switch-root-background-color);border:none;border-radius:9999px;height:25px;position:relative;transition:background-color .1s;width:42px}.switch-root:focus-visible{outline:none}.switch-root[data-state=checked]{background-color:#fc0;background-color:var(--yellow-accent)}.switch-thumb{background-color:#fff;background-color:var(--switch-thumb-color);border-radius:9999px;display:block;height:17px;-webkit-transform:translateX(4px);transform:translateX(4px);transition:-webkit-transform .1s;transition:transform .1s;transition:transform .1s,-webkit-transform .1s;width:17px;will-change:transform}.switch-thumb[data-state=checked]{background-color:#fff;background-color:var(--switch-thumb-checked-color);outline:1px solid hsla(240,9%,43%,.5);-webkit-transform:translateX(21px);transform:translateX(21px)}.number-input{all:unset;border-radius:.5rem;flex:1 0 auto;height:32px;outline:1px solid #eff1f4;outline:1px solid var(--border-color);padding:0 .8rem;text-align:right}.number-input:focus-visible{outline:1px solid #fc0;outline:1px solid var(--yellow-accent)}.number-input:disabled{color:#eff1f4;color:var(--border-color)}.toast-viewpoint{bottom:48px;display:flex;flex-direction:row;gap:10px;margin:0;max-width:100vw;padding:25px;position:fixed;right:1.5rem;z-index:999999}.toast-viewpoint:focus-visible{outline:none}.toast-root{align-items:center;background-color:#fff;background-color:var(--page-bg);border:1px solid hsla(240,9%,43%,.5);border:1px solid var(--border-color-light);border-radius:.6rem;display:flex;gap:12px;padding:15px}.toast-root[data-state=open]{-webkit-animation:slideIn .15s cubic-bezier(.16,1,.3,1);animation:slideIn .15s cubic-bezier(.16,1,.3,1)}.toast-root[data-state=close]{-webkit-animation:opacityReveal .1s ease-in forwards;animation:opacityReveal .1s ease-in forwards}.toast-root[data-state=cancel]{-webkit-animation:transform .1s ease-out;animation:transform .1s ease-out;-webkit-transform:translateX(0);transform:translateX(0)}.toast-root.error{border:1px solid #ef4444;border:1px solid var(--error-color)}.toast-root.success{border:1px solid #10b981;border:1px solid var(--success-color)}.error-icon{color:#ef4444;color:var(--error-color);height:24px;width:24px}.success-icon{color:#10b981;color:var(--success-color);height:24px;width:24px}.loading-icon{-webkit-animation-duration:1.5s;animation-duration:1.5s;-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite;-webkit-animation-name:spin;animation-name:spin;-webkit-animation-timing-function:linear;animation-timing-function:linear;-webkit-transform-origin:center center;transform-origin:center center}.loading-icon,.toast-desc,.toast-icon{align-items:center;display:flex}.toast-desc{color:#040404;color:var(--text-color);margin:0;min-width:240px}.tooltip-trigger{align-items:center;display:flex;justify-content:center}.tooltip-content{background-color:#fff;background-color:var(--tooltip-bg);border-radius:4px;box-shadow:0 10px 38px -10px rgba(14,18,22,.35),0 10px 20px -15px rgba(14,18,22,.2);color:#000;color:var(--tooltip-text-color);padding:10px 15px}@media(prefers-reduced-motion:no-preference){.tooltip-content{-webkit-animation-duration:.4s;animation-duration:.4s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:cubic-bezier(.16,1,.3,1);animation-timing-function:cubic-bezier(.16,1,.3,1);will-change:transform,opacity}.tooltip-content[data-state=delayed-open][data-side=top]{-webkit-animation-name:slideDownAndFade;animation-name:slideDownAndFade}.tooltip-content[data-state=delayed-open][data-side=bottom]{-webkit-animation-name:slideUpAndFade;animation-name:slideUpAndFade}}.tooltip-arrow{fill:#fff;fill:var(--tooltip-bg)}*,:after,:before{box-sizing:border-box;margin:0;padding:0}body,html{font-family:WorkSans,sans-serif}
app/build/static/js/main.a2cdd7a2.js ADDED
The diff for this file is too large to render. See raw diff
 
app/build/static/js/main.a2cdd7a2.js.LICENSE.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ object-assign
3
+ (c) Sindre Sorhus
4
+ @license MIT
5
+ */
6
+
7
+ /*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
8
+
9
+ /**
10
+ * @license
11
+ * Lodash <https://lodash.com/>
12
+ * Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
13
+ * Released under MIT license <https://lodash.com/license>
14
+ * Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
15
+ * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
16
+ */
17
+
18
+ /** @license React v0.20.2
19
+ * scheduler.production.min.js
20
+ *
21
+ * Copyright (c) Facebook, Inc. and its affiliates.
22
+ *
23
+ * This source code is licensed under the MIT license found in the
24
+ * LICENSE file in the root directory of this source tree.
25
+ */
26
+
27
+ /** @license React v17.0.2
28
+ * react-dom.production.min.js
29
+ *
30
+ * Copyright (c) Facebook, Inc. and its affiliates.
31
+ *
32
+ * This source code is licensed under the MIT license found in the
33
+ * LICENSE file in the root directory of this source tree.
34
+ */
35
+
36
+ /** @license React v17.0.2
37
+ * react-jsx-runtime.production.min.js
38
+ *
39
+ * Copyright (c) Facebook, Inc. and its affiliates.
40
+ *
41
+ * This source code is licensed under the MIT license found in the
42
+ * LICENSE file in the root directory of this source tree.
43
+ */
44
+
45
+ /** @license React v17.0.2
46
+ * react.production.min.js
47
+ *
48
+ * Copyright (c) Facebook, Inc. and its affiliates.
49
+ *
50
+ * This source code is licensed under the MIT license found in the
51
+ * LICENSE file in the root directory of this source tree.
52
+ */
53
+
54
+ /**!
55
+ * FlexSearch.js v0.7.21 (Bundle)
56
+ * Copyright 2018-2021 Nextapps GmbH
57
+ * Author: Thomas Wilkerling
58
+ * Licence: Apache-2.0
59
+ * https://github.com/nextapps-de/flexsearch
60
+ */
app/build/static/js/main.ca662570.js ADDED
The diff for this file is too large to render. See raw diff
 
app/build/static/js/main.ca662570.js.LICENSE.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ object-assign
3
+ (c) Sindre Sorhus
4
+ @license MIT
5
+ */
6
+
7
+ /*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
8
+
9
+ /**
10
+ * @license
11
+ * Lodash <https://lodash.com/>
12
+ * Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
13
+ * Released under MIT license <https://lodash.com/license>
14
+ * Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
15
+ * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
16
+ */
17
+
18
+ /** @license React v0.20.2
19
+ * scheduler.production.min.js
20
+ *
21
+ * Copyright (c) Facebook, Inc. and its affiliates.
22
+ *
23
+ * This source code is licensed under the MIT license found in the
24
+ * LICENSE file in the root directory of this source tree.
25
+ */
26
+
27
+ /** @license React v17.0.2
28
+ * react-dom.production.min.js
29
+ *
30
+ * Copyright (c) Facebook, Inc. and its affiliates.
31
+ *
32
+ * This source code is licensed under the MIT license found in the
33
+ * LICENSE file in the root directory of this source tree.
34
+ */
35
+
36
+ /** @license React v17.0.2
37
+ * react-jsx-runtime.production.min.js
38
+ *
39
+ * Copyright (c) Facebook, Inc. and its affiliates.
40
+ *
41
+ * This source code is licensed under the MIT license found in the
42
+ * LICENSE file in the root directory of this source tree.
43
+ */
44
+
45
+ /** @license React v17.0.2
46
+ * react.production.min.js
47
+ *
48
+ * Copyright (c) Facebook, Inc. and its affiliates.
49
+ *
50
+ * This source code is licensed under the MIT license found in the
51
+ * LICENSE file in the root directory of this source tree.
52
+ */
53
+
54
+ /**!
55
+ * FlexSearch.js v0.7.21 (Bundle)
56
+ * Copyright 2018-2021 Nextapps GmbH
57
+ * Author: Thomas Wilkerling
58
+ * Licence: Apache-2.0
59
+ * https://github.com/nextapps-de/flexsearch
60
+ */
app/build/static/js/main.ed69b879.js ADDED
The diff for this file is too large to render. See raw diff
 
app/build/static/js/main.ed69b879.js.LICENSE.txt ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ object-assign
3
+ (c) Sindre Sorhus
4
+ @license MIT
5
+ */
6
+
7
+ /*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
8
+
9
+ /**
10
+ * @license
11
+ * Lodash <https://lodash.com/>
12
+ * Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
13
+ * Released under MIT license <https://lodash.com/license>
14
+ * Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
15
+ * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
16
+ */
17
+
18
+ /** @license React v0.20.2
19
+ * scheduler.production.min.js
20
+ *
21
+ * Copyright (c) Facebook, Inc. and its affiliates.
22
+ *
23
+ * This source code is licensed under the MIT license found in the
24
+ * LICENSE file in the root directory of this source tree.
25
+ */
26
+
27
+ /** @license React v17.0.2
28
+ * react-dom.production.min.js
29
+ *
30
+ * Copyright (c) Facebook, Inc. and its affiliates.
31
+ *
32
+ * This source code is licensed under the MIT license found in the
33
+ * LICENSE file in the root directory of this source tree.
34
+ */
35
+
36
+ /** @license React v17.0.2
37
+ * react-jsx-runtime.production.min.js
38
+ *
39
+ * Copyright (c) Facebook, Inc. and its affiliates.
40
+ *
41
+ * This source code is licensed under the MIT license found in the
42
+ * LICENSE file in the root directory of this source tree.
43
+ */
44
+
45
+ /** @license React v17.0.2
46
+ * react.production.min.js
47
+ *
48
+ * Copyright (c) Facebook, Inc. and its affiliates.
49
+ *
50
+ * This source code is licensed under the MIT license found in the
51
+ * LICENSE file in the root directory of this source tree.
52
+ */
53
+
54
+ /**!
55
+ * FlexSearch.js v0.7.21 (Bundle)
56
+ * Copyright 2018-2021 Nextapps GmbH
57
+ * Author: Thomas Wilkerling
58
+ * Licence: Apache-2.0
59
+ * https://github.com/nextapps-de/flexsearch
60
+ */
app/build/static/media/WorkSans-Black.67c2c5a144333953880b.ttf ADDED
Binary file (192 kB). View file
 
app/build/static/media/WorkSans-Bold.2bea7a7f7d052c74da25.ttf ADDED
Binary file (193 kB). View file
 
app/build/static/media/WorkSans-Regular.bb287b894b27372d8ea7.ttf ADDED
Binary file (192 kB). View file
 
app/build/static/media/WorkSans-SemiBold.1e98db4eb705b586728e.ttf ADDED
Binary file (193 kB). View file
 
app/build/static/media/coffee-machine-lineal.ee32631219cc3986f861.gif ADDED
benchmark.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # #!/usr/bin/env python3
2
+ #
3
+ # import argparse
4
+ # import os
5
+ # import time
6
+ # import numpy as np
7
+ # import nvidia_smi
8
+ # import psutil
9
+ # import torch
10
+ #
11
+ # from model_manager import ModelManager
12
+ # from schema import Config, HDStrategy, SDSampler
13
+ #
14
+ # try:
15
+ # torch._C._jit_override_can_fuse_on_cpu(False)
16
+ # torch._C._jit_override_can_fuse_on_gpu(False)
17
+ # torch._C._jit_set_texpr_fuser_enabled(False)
18
+ # torch._C._jit_set_nvfuser_enabled(False)
19
+ # except:
20
+ # pass
21
+ #
22
+ # NUM_THREADS = str(4)
23
+ #
24
+ # os.environ["OMP_NUM_THREADS"] = NUM_THREADS
25
+ # os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
26
+ # os.environ["MKL_NUM_THREADS"] = NUM_THREADS
27
+ # os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
28
+ # os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
29
+ # if os.environ.get("CACHE_DIR"):
30
+ # os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
31
+ #
32
+ #
33
+ # def run_model(model, size):
34
+ # # RGB
35
+ # image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8)
36
+ # mask = np.random.randint(0, 255, size).astype(np.uint8)
37
+ #
38
+ # config = Config(
39
+ # ldm_steps=2,
40
+ # hd_strategy=HDStrategy.ORIGINAL,
41
+ # hd_strategy_crop_margin=128,
42
+ # hd_strategy_crop_trigger_size=128,
43
+ # hd_strategy_resize_limit=128,
44
+ # prompt="a fox is sitting on a bench",
45
+ # sd_steps=5,
46
+ # sd_sampler=SDSampler.ddim
47
+ # )
48
+ # model(image, mask, config)
49
+ #
50
+ #
51
+ # def benchmark(model, times: int, empty_cache: bool):
52
+ # sizes = [(512, 512)]
53
+ #
54
+ # nvidia_smi.nvmlInit()
55
+ # device_id = 0
56
+ # handle = nvidia_smi.nvmlDeviceGetHandleByIndex(device_id)
57
+ #
58
+ # def format(metrics):
59
+ # return f"{np.mean(metrics):.2f} ± {np.std(metrics):.2f}"
60
+ #
61
+ # process = psutil.Process(os.getpid())
62
+ # # 每个 size 给出显存和内存占用的指标
63
+ # for size in sizes:
64
+ # torch.cuda.empty_cache()
65
+ # time_metrics = []
66
+ # cpu_metrics = []
67
+ # memory_metrics = []
68
+ # gpu_memory_metrics = []
69
+ # for _ in range(times):
70
+ # start = time.time()
71
+ # run_model(model, size)
72
+ # torch.cuda.synchronize()
73
+ #
74
+ # # cpu_metrics.append(process.cpu_percent())
75
+ # time_metrics.append((time.time() - start) * 1000)
76
+ # memory_metrics.append(process.memory_info().rss / 1024 / 1024)
77
+ # gpu_memory_metrics.append(nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024)
78
+ #
79
+ # print(f"size: {size}".center(80, "-"))
80
+ # # print(f"cpu: {format(cpu_metrics)}")
81
+ # print(f"latency: {format(time_metrics)}ms")
82
+ # print(f"memory: {format(memory_metrics)} MB")
83
+ # print(f"gpu memory: {format(gpu_memory_metrics)} MB")
84
+ #
85
+ # nvidia_smi.nvmlShutdown()
86
+ #
87
+ #
88
+ # def get_args_parser():
89
+ # parser = argparse.ArgumentParser()
90
+ # parser.add_argument("--name")
91
+ # parser.add_argument("--device", default="cuda", type=str)
92
+ # parser.add_argument("--times", default=10, type=int)
93
+ # parser.add_argument("--empty-cache", action="store_true")
94
+ # return parser.parse_args()
95
+ #
96
+ #
97
+ # if __name__ == "__main__":
98
+ # args = get_args_parser()
99
+ # device = torch.device(args.device)
100
+ # model = ModelManager(
101
+ # name=args.name,
102
+ # device=device,
103
+ # sd_run_local=True,
104
+ # disable_nsfw=True,
105
+ # sd_cpu_textencoder=True,
106
+ # hf_access_token="123"
107
+ # )
108
+ # benchmark(model, args.times, args.empty_cache)
const.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ MPS_SUPPORT_MODELS = [
4
+ "instruct_pix2pix",
5
+ "sd1.5",
6
+ "anything4",
7
+ "realisticVision1.4",
8
+ "sd2",
9
+ "paint_by_example"
10
+ ]
11
+
12
+ DEFAULT_MODEL = "lama"
13
+ AVAILABLE_MODELS = [
14
+ "lama",
15
+ "ldm",
16
+ "zits",
17
+ "mat",
18
+ "fcf",
19
+ "sd1.5",
20
+ "anything4",
21
+ "realisticVision1.4",
22
+ "cv2",
23
+ "manga",
24
+ "sd2",
25
+ "paint_by_example",
26
+ "instruct_pix2pix",
27
+ ]
28
+
29
+ AVAILABLE_DEVICES = ["cuda", "cpu", "mps"]
30
+ DEFAULT_DEVICE = 'cuda'
31
+
32
+ NO_HALF_HELP = """
33
+ Using full precision model.
34
+ If your generate result is always black or green, use this argument. (sd/paint_by_exmaple)
35
+ """
36
+
37
+ CPU_OFFLOAD_HELP = """
38
+ Offloads all models to CPU, significantly reducing vRAM usage. (sd/paint_by_example)
39
+ """
40
+
41
+ DISABLE_NSFW_HELP = """
42
+ Disable NSFW checker. (sd/paint_by_example)
43
+ """
44
+
45
+ SD_CPU_TEXTENCODER_HELP = """
46
+ Run Stable Diffusion text encoder model on CPU to save GPU memory.
47
+ """
48
+
49
+ LOCAL_FILES_ONLY_HELP = """
50
+ Use local files only, not connect to Hugging Face server. (sd/paint_by_example)
51
+ """
52
+
53
+ ENABLE_XFORMERS_HELP = """
54
+ Enable xFormers optimizations. Requires xformers package has been installed. See: https://github.com/facebookresearch/xformers (sd/paint_by_example)
55
+ """
56
+
57
+ DEFAULT_MODEL_DIR = os.getenv(
58
+ "XDG_CACHE_HOME",
59
+ os.path.join(os.path.expanduser("~"), ".cache")
60
+ )
61
+ MODEL_DIR_HELP = """
62
+ Model download directory (by setting XDG_CACHE_HOME environment variable), by default model downloaded to ~/.cache
63
+ """
64
+
65
+ OUTPUT_DIR_HELP = """
66
+ Result images will be saved to output directory automatically without confirmation.
67
+ """
68
+
69
+ INPUT_HELP = """
70
+ If input is image, it will be loaded by default.
71
+ If input is directory, you can browse and select image in file manager.
72
+ """
73
+
74
+ GUI_HELP = """
75
+ Launch Lama Cleaner as desktop app
76
+ """
77
+
78
+ NO_GUI_AUTO_CLOSE_HELP = """
79
+ Prevent backend auto close after the GUI window closed.
80
+ """
ext/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .image_watermark_handler import ImageWatermarkHandler
ext/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (297 Bytes). View file
 
ext/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (284 Bytes). View file
 
ext/__pycache__/image_watermark_handler.cpython-38.pyc ADDED
Binary file (2.58 kB). View file
 
ext/__pycache__/image_watermark_handler.cpython-39.pyc ADDED
Binary file (2.56 kB). View file
 
ext/image_watermark_handler.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import shutil
4
+
5
+ import cv2 as cv
6
+ import numpy as np
7
+ import requests
8
+ from PIL import Image
9
+
10
+
11
+ class ImageWatermarkHandler:
12
+
13
+ def __init__(self):
14
+ pass
15
+
16
+ def index_watermark(self, img_path, block_size, step_len, threshold, temp_output_path):
17
+ """
18
+ 该代码首先读取图像,并定义了块的大小和阈值。然后,它遍历图像的每个块,并计算每个块的标准差。
19
+ 如果标准差小于阈值,则认为当前块变化微小。然后将图片保存到本地(可选)并返回 list
20
+ :param img_path: img路径
21
+ :param watermark_size: 定义块的大小
22
+ :param step_len: 遍历的步长
23
+ :param threshold: 定义阈值
24
+ :param temp_output_path: sub_img临时保存路径
25
+ :return:
26
+ """
27
+ # read img
28
+ img = cv.imread(img_path, cv.IMREAD_GRAYSCALE) # 把图像转成单通道的灰度图输出
29
+
30
+ # get width and height
31
+ height, width = img.shape
32
+ print("cur gray img width: %s,height: %s,block_size:%s" % (width, height, block_size))
33
+ block_num = int(height * width // block_size)
34
+ print("total split block num : %s" % (block_num))
35
+
36
+ # remove last res dir
37
+ # 不保存图片就不用创建文件夹
38
+ # if (os.path.exists(temp_output_path)):
39
+ # shutil.rmtree(temp_output_path)
40
+ # os.mkdir(temp_output_path)
41
+
42
+ # save pixel index to memory and lcoal file
43
+ list = []
44
+ # foreach block
45
+ for i in range(0, height, step_len):
46
+ for j in range(0, width, step_len):
47
+ # get pixel value
48
+ block = img[i:i + block_size, j:j + block_size]
49
+ # print("cur idx [%s,%s], block : %s " %(i,j,block))
50
+
51
+ # calculate std_dev
52
+ std_dev = np.std(block)
53
+ # print(" cur std_dev :{} ,cur threshold : {} ".format(std_dev, threshold)) # 测试的像素区域,w:45-65--->com
54
+
55
+ # 如果标准差小于阈值,则认为当前块变化微小
56
+ if std_dev < threshold and std_dev > 0:
57
+ # save memory
58
+ dict = {}
59
+ dict['w'] = j
60
+ dict['h'] = i
61
+ list.append(dict)
62
+ # save local file
63
+ f = temp_output_path + "{}-{}.png".format(j, i)
64
+ print("save split img =====> w : %s ,h : %s ,cur std_dev : %s,cur threshold : %s ".format(j, i,
65
+ std_dev,
66
+ threshold)) # 测试的像素区域,w:45-65--->com
67
+ # 可以不保存图片
68
+ # cv.imwrite(f, block)
69
+ return list
70
+
71
+ def get_mask(self, img_path, list, block_size, mask_img_path):
72
+ """
73
+ 获取mask
74
+ :param img_path:
75
+ :param list:
76
+ :param block_size:
77
+ :param mask_img_path:
78
+ :return:
79
+ """
80
+ img = cv.imread(img_path, cv.IMREAD_COLOR)
81
+ # black color
82
+ img[:] = 0
83
+ for item in list:
84
+ w = int(item.get("w"))
85
+ h = int(item.get("h"))
86
+ x1, y1 = w, h # 左上角坐标
87
+ x2, y2 = w + block_size, h + block_size # 右下角坐标
88
+ # white color
89
+ img[y1:y2, x1:x2] = 255, 255, 255
90
+ # print(img[y1,x1])
91
+ # save
92
+ cv.imwrite(mask_img_path, img)
93
+ return img
ext/request_info.txt ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 参数
2
+
3
+ image: (binary) 源图像文件流
4
+ mask: (binary) 掩码图像字节流
5
+
6
+ ldm模型配置:
7
+ ldmSteps: 25
8
+ ldmSampler: plms
9
+
10
+ zitsWireframe: true 线框
11
+
12
+ hdStrategy: Crop ---> 支持的策略有:Crop、Origin、Resize。Crop masking area from the original image to do inpainting.对GPU友好
13
+ hdStrategyCropMargin: 196
14
+ hdStrategyCropTrigerSize: 800 ---> 会变
15
+ hdStrategyResizeLimit: 2048
16
+ prompt:
17
+ negativePrompt:
18
+
19
+ croperX: -206
20
+ croperY: -222
21
+ croperHeight: 512
22
+ croperWidth: 512
23
+ useCroper: false
24
+
25
+ sdMaskBlur: 5
26
+ sdStrength: 0.75
27
+ sdSteps: 50
28
+ sdGuidanceScale: 7.5
29
+ sdSampler: pndm
30
+ sdSeed: -1
31
+ sdMatchHistograms: false
32
+ sdScale: 1
33
+ cv2Radius: 5
34
+ cv2Flag: INPAINT_NS
35
+ paintByExampleSteps: 50
36
+ paintByExampleGuidanceScale: 7.5
37
+ paintByExampleSeed: -1 ---> 会变
38
+ paintByExampleMaskBlur: 5
39
+ paintByExampleMatchHistograms: false
40
+ p2pSteps: 50
41
+ p2pImageGuidanceScale: 1.5
42
+ p2pGuidanceScale: 7.5
43
+ sizeLimit: 99
44
+
ext/test.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import cv2
4
+ import numpy as np
5
+ import requests
6
+ from PIL import Image
7
+
8
+ """
9
+ 1、测试lama-cleaner的inpaint api
10
+ """
11
+ def test_inpaint_api():
12
+ """
13
+ 参数为image、mask
14
+ :return:
15
+ """
16
+
17
+ # 加载原始图像并将其转换为灰度图像
18
+
19
+
20
+
21
+ img_path = '/resources/jeyoo-img/img.png'
22
+ mask_img_path = '/resources/jeyoo-img/img_mask.png'
23
+ clean_img_path = "/resources/jeyoo-img/img_clean.png"
24
+
25
+ img = cv2.imread(img_path)
26
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
27
+ cv2.imshow('gray', gray)
28
+
29
+ cv2.imwrite(mask_img_path,gray)
30
+
31
+ # image_file_object = cv2.imread(img_path)
32
+ # mask_file_object = cv2.imread(mask_img_path)
33
+
34
+ r = requests.post('http://127.0.0.1:7860/inpaint',
35
+ files={
36
+ 'image': open(img_path, 'rb'),
37
+ 'mask': open(mask_img_path, 'rb')},
38
+ data={
39
+ 'ldmSteps': 25,
40
+ 'ldmSampler': "plms",
41
+ 'zitsWireframe': bool(True),
42
+ 'hdStrategy': "Crop",
43
+ 'hdStrategyCropMargin': 196,
44
+ 'hdStrategyCropTrigerSize': 800,
45
+ 'hdStrategyResizeLimit': 2048,
46
+ 'prompt': "",
47
+ 'negativePrompt': "",
48
+ 'croperX': 58,
49
+ 'croperY': -26,
50
+ 'croperHeight': 512,
51
+ 'croperWidth': 512,
52
+ 'useCroper': bool(False),
53
+ 'sdMaskBlur': 5,
54
+ 'sdStrength': 0.75,
55
+ 'sdSteps': 50,
56
+ 'sdGuidanceScale': 7.5,
57
+ 'sdSampler': "pndm",
58
+ 'sdSeed': -1,
59
+ 'sdMatchHistograms': bool(False),
60
+ 'sdScale': 1,
61
+ 'cv2Radius': 5,
62
+ 'cv2Flag': "INPAINT_NS",
63
+ 'paintByExampleSteps': 50,
64
+ 'paintByExampleGuidanceScale': 7.5,
65
+ 'paintByExampleSeed': -1,
66
+ 'paintByExampleMaskBlur': 5,
67
+ 'paintByExampleMatchHistograms': bool(False),
68
+ 'p2pSteps': 50,
69
+ 'p2pImageGuidanceScale': 1.5,
70
+ 'p2pGuidanceScale': 7.5,
71
+ 'sizeLimit': 628
72
+ },
73
+ headers={'x-api-key': 'xxxx'}
74
+ )
75
+ if (r.ok):
76
+ # r.content contains the bytes of the returned image
77
+ print(r)
78
+ image_data = r.content
79
+
80
+ # 将图片数据转换为图像对象
81
+ image = Image.open(io.BytesIO(image_data))
82
+ # 将图像对象保存到本地文件
83
+ image.save(clean_img_path)
84
+ else:
85
+ r.raise_for_status()
86
+
87
+
88
+
89
+
90
+ """
91
+ 2、测试从从image根据 watermark 获取 mask img
92
+ """
93
+ def test_get_mask_by_gradient():
94
+ """
95
+ 在这个示例代码中,我们首先读取一张图片,并将其转换为灰度图像。
96
+ 然后,我们使用Sobel算子计算图像的梯度,并计算像素变化的平均值。
97
+ 接着,我们将像素变化小于平均值一半的像素设置为0,得到一个二值掩码。
98
+ 最后,我们使用findContours函数找到掩码中的轮廓,并使用boundingRect函数获取每个轮廓的矩形框。
99
+ 最后,我们在原图上绘制矩形框,并显示结果。
100
+ """
101
+ # 读取图片
102
+ img = cv2.imread('../lama_cleaner_source_code/resources/jeyoo-img/image31.png')
103
+
104
+ # 转换为灰度图像
105
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
106
+
107
+ # 计算梯度
108
+
109
+ # src:gray图像
110
+ # ddepth:int类型的ddepth,输出图像的深度,若src.depth() = CV_32F, 取ddepth =-1/CV_32F/CV_64F
111
+ # dx、dy:X、y方向的差分阶数
112
+ grad_x = cv2.Sobel(gray, cv2.CV_32F, 1, 0)
113
+ grad_y = cv2.Sobel(gray, cv2.CV_32F, 0, 1)
114
+ # print("grad_x:{},grad_y:{}".format(grad_x,grad_y))
115
+
116
+ # gray_x、gray_y是矩阵,第二个参数是权重
117
+ grad = cv2.addWeighted(grad_x, 0.5, grad_y, 0.5, 0)
118
+
119
+ # 计算像素变化小的区域
120
+
121
+ # 对数组中的每一个元素求其绝对值。
122
+ grad_abs = np.absolute(grad)
123
+ # 相似变化的平均值
124
+ grad_mean = np.mean(grad_abs)
125
+ # 0.7是最大
126
+ grad_threshold = grad_mean * 0.5
127
+ grad_mask = grad_abs < grad_threshold
128
+ # for i, element in enumerate(grad_abs):
129
+ # print("第 {} 行 ".format(i))
130
+ # for j in enumerate(element):
131
+ # print("{}".format(j))
132
+
133
+ print("grad_mean:{},grad_threshold:{}".format(grad_mean,grad_threshold)) # grad_mean:93.4161148071289,grad_threshold:46.70805740356445
134
+
135
+
136
+ # 获取像素变化小的区域的矩形框
137
+ contours, hierarchy = cv2.findContours(grad_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
138
+ # print(contours)
139
+ rects = [cv2.boundingRect(cnt) for cnt in contours]
140
+
141
+ # 在原图上绘制矩形框
142
+ for rect in rects:
143
+ cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255), 2)
144
+
145
+
146
+ # 显示结果
147
+ cv2.imshow('image', img)
148
+ cv2.waitKey(0)
149
+ cv2.destroyAllWindows()
150
+ cv2.imwrite('../lama_cleaner_source_code/resources/jeyoo-img/out/imgage31.png',img)
151
+
152
+
153
+ def test_get_mask_by_watermark():
154
+ """
155
+ 从image根据 watermark_img 获取mask img
156
+
157
+ 要从图像中获取水印位置的掩码图像,您可以使用OpenCV中的模板匹配技术。模板匹配是一种在图像中查找给定模板的技术,它可以用于检测图像中的水印位置。
158
+ 以下是使用OpenCV从图像中获取水印位置的掩码图像的步骤:
159
+ 加载原始图像和水印图像。
160
+ 将水印图像转换为灰度图像。
161
+ 使用OpenCV中的模板匹配函数(cv2.matchTemplate)在原始图像中查找水印图像的位置。
162
+ 根据匹配结果创建掩码图像。在掩码图像中,将匹配位置设置为白色,其他位置设置为黑色。
163
+ :return:
164
+ """
165
+
166
+ # 加载原始图像和水印图像
167
+ img = cv2.imread('original_image.jpg')
168
+ watermark = cv2.imread('watermark_image.jpg')
169
+
170
+ # 将水印图像转换为灰度图像
171
+ watermark_gray = cv2.cvtColor(watermark, cv2.COLOR_BGR2GRAY)
172
+
173
+ # 使用模板匹配在原始图像中查找水印图像的位置
174
+ result = cv2.matchTemplate(img, watermark_gray, cv2.TM_CCOEFF_NORMED)
175
+
176
+ # 根据匹配结果创建掩码图像
177
+ threshold = 0.8
178
+ mask = np.zeros_like(result)
179
+ mask[result >= threshold] = 255
180
+
181
+ # 显示掩码图像
182
+ cv2.imshow('Mask', mask)
183
+
184
+ # 等待用户按下任意键
185
+ cv2.waitKey(0)
186
+
187
+ # 释放窗口
188
+ cv2.destroyAllWindows()
189
+
190
+
191
+ def test_get_mask_by_watermark2():
192
+
193
+ """
194
+ 在上面的代码中,我们使用Canny算子对灰度图像进行边缘检测,并使用cv2.findContours函数对边缘图像进行轮廓检测。
195
+ 然后,我们对每个轮廓进行形状分析,并根据筛选出的轮廓创建掩码图像。
196
+ 在筛选轮廓时,我们使用了cv2.isContourConvex函数来排除非凸形状的轮廓。您可以根据需要调整形状分析的参数来获得更好的结果。
197
+ :return:
198
+ """
199
+ # 加载原始图像并将其转换为灰度图像
200
+ img = cv2.imread('../lama_cleaner_source_code/resources/jeyoo-img/img.png')
201
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
202
+
203
+ cv2.imshow('gray', gray)
204
+ cv2.waitKey(0)
205
+
206
+ # 对灰度图像进行边缘检测
207
+ edges = cv2.Canny(gray, 50, 150)
208
+ cv2.imshow('edges', edges)
209
+ cv2.waitKey(0)
210
+
211
+
212
+ # 对边缘图像进行轮廓检测
213
+ contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
214
+
215
+
216
+ # 对每个轮廓进行形状分析,筛选出可能是水印的轮廓
217
+ watermark_contours = []
218
+ for contour in contours:
219
+ perimeter = cv2.arcLength(contour, True)
220
+ approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)
221
+ print(len(approx))
222
+ if len(approx) == 6 and cv2.isContourConvex(approx):
223
+ watermark_contours.append(approx)
224
+
225
+ # 根据筛选出的轮廓,创建掩码图像
226
+ mask = np.zeros_like(gray)
227
+ for contour in watermark_contours:
228
+ cv2.drawContours(mask, [contour], 0, 255, -1)
229
+
230
+ # 显示掩码图像
231
+ cv2.imshow('Mask', mask)
232
+
233
+ # 等待用户按下任意键
234
+ cv2.waitKey(0)
235
+
236
+ # 释放窗口
237
+ cv2.destroyAllWindows()
238
+
239
+
240
+ def test_get_mask_img_watermark3():
241
+ """
242
+
243
+ 要为水印区域生成掩码图像,您可以使用OpenCV中的矩形掩码。以下是生成掩码图像的步骤:
244
+ 创建一个与原始图像大小相同的黑色图像,作为掩码图像。
245
+ 使用OpenCV中的 cv2.rectangle() 函数在掩码图像上绘制一个矩形,该矩形覆盖水印区域。
246
+ 将掩码图像转换为灰度图像,并使用OpenCV中的 cv2.threshold() 函数将其二值化,以便将水印区域设置为白色,其余区域设置为黑色。
247
+ :return:
248
+ """
249
+ img = cv2.imread('../lama_cleaner_source_code/resources/jeyoo-img/img.png')
250
+
251
+ # 创建掩码图像
252
+ mask = np.zeros_like(img)
253
+
254
+ # 定义水印区域
255
+ x, y, w, h = 3, 18, 29, 29
256
+
257
+ # 在掩码图像上绘制矩形
258
+ cv2.rectangle(mask, (x, y), (x + w, y + h), (255, 255, 255), -1)
259
+
260
+ # 将掩码图像转换为灰度图像
261
+ mask_gray = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
262
+
263
+ # 将掩码图像二值化
264
+ _, mask_binary = cv2.threshold(mask_gray, 1, 255, cv2.THRESH_BINARY)
265
+
266
+ # 显示掩码图像
267
+ cv2.imshow('Mask', mask_binary)
268
+ cv2.waitKey(0)
269
+ cv2.destroyAllWindows()
270
+
271
+
272
+ """
273
+ 3、测试从image根据 watermark 获取 mask img
274
+ """
275
+
276
+ def test_get_mask_by_inrange():
277
+
278
+ """
279
+ 方式1
280
+ :return:
281
+ """
282
+ img_path = '../lama_cleaner_docker/resources/jeyoo2-shuiyin.png'
283
+ img = cv2.imread(img_path)
284
+ # 转换为灰度图像
285
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
286
+ # 计算掩码,删除像素位于该区间的
287
+ mask = cv2.inRange(gray, 240, 255)
288
+ cv2.imshow('mask1', mask)
289
+ cv2.waitKey(0)
290
+
291
+
292
+ """
293
+ 方式2
294
+ """
295
+ # 读取图片
296
+ img = cv2.imread(img_path)
297
+ cv2.imshow("img" ,img)
298
+ cv2.waitKey(0)
299
+ # 将图片转换为HSV颜色空间
300
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
301
+ cv2.imshow("hsv", hsv)
302
+ cv2.waitKey(0)
303
+
304
+ means, dev = cv2.meanStdDev(img)
305
+ print("means:{}".format(means))
306
+ # means: [[227.75111119]
307
+ # [228.73636804]
308
+ # [225.89541678]]
309
+
310
+ # 定义颜色范围
311
+ lower_color = np.array([100, 100, 100])
312
+ upper_color = np.array([255, 255, 255])
313
+
314
+ # 创建掩码
315
+ mask2 = cv2.inRange(hsv, lower_color, upper_color)
316
+
317
+ cv2.imshow("mask2", mask2)
318
+ cv2.waitKey(0)
319
+
320
+ # 获取选定区域
321
+ result = cv2.bitwise_and(img, img, mask=mask)
322
+
323
+ # 显示结果
324
+ cv2.imshow('image', result)
325
+ cv2.waitKey(0)
326
+ cv2.destroyAllWindows()
327
+
328
+
329
+
330
+ """
331
+ 测试opencv的inpaint修复方法
332
+ """
333
+ def test_repire_old_img_by_cv():
334
+ img = cv2.imread('../lama_cleaner_source_code/resources/jeyoo-shuiyin.png')
335
+ mask = cv2.imread('../lama_cleaner_source_code/resources/jeyoo2-shuiyin_mask.jpg', cv2.IMREAD_GRAYSCALE)
336
+ dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)
337
+ cv2.imshow('dst', dst)
338
+ cv2.waitKey(0)
339
+ cv2.destroyAllWindows()
340
+
341
+
file_manager/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .file_manager import FileManager
file_manager/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (285 Bytes). View file
 
file_manager/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (272 Bytes). View file
 
file_manager/__pycache__/file_manager.cpython-38.pyc ADDED
Binary file (7.32 kB). View file
 
file_manager/__pycache__/file_manager.cpython-39.pyc ADDED
Binary file (7.29 kB). View file
 
file_manager/__pycache__/storage_backends.cpython-38.pyc ADDED
Binary file (2.07 kB). View file
 
file_manager/__pycache__/storage_backends.cpython-39.pyc ADDED
Binary file (2.09 kB). View file
 
file_manager/__pycache__/utils.cpython-38.pyc ADDED
Binary file (1.72 kB). View file
 
file_manager/__pycache__/utils.cpython-39.pyc ADDED
Binary file (1.7 kB). View file
 
file_manager/file_manager.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/thumbnail.py
2
+ import os
3
+ from datetime import datetime
4
+
5
+ import cv2
6
+ import time
7
+ from io import BytesIO
8
+ from pathlib import Path
9
+ import numpy as np
10
+ from watchdog.events import FileSystemEventHandler
11
+ from watchdog.observers import Observer
12
+
13
+ from PIL import Image, ImageOps, PngImagePlugin
14
+ from loguru import logger
15
+
16
+ LARGE_ENOUGH_NUMBER = 100
17
+ PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
18
+ from .storage_backends import FilesystemStorageBackend
19
+ from .utils import aspect_to_string, generate_filename, glob_img
20
+
21
+
22
+ class FileManager(FileSystemEventHandler):
23
+ def __init__(self, app=None):
24
+ self.app = app
25
+ self._default_root_directory = "media"
26
+ self._default_thumbnail_directory = "media"
27
+ self._default_root_url = "/"
28
+ self._default_thumbnail_root_url = "/"
29
+ self._default_format = "JPEG"
30
+ self.output_dir: Path = None
31
+
32
+ if app is not None:
33
+ self.init_app(app)
34
+
35
+ self.image_dir_filenames = []
36
+ self.output_dir_filenames = []
37
+
38
+ self.image_dir_observer = None
39
+ self.output_dir_observer = None
40
+
41
+ self.modified_time = {
42
+ "image": datetime.utcnow(),
43
+ "output": datetime.utcnow(),
44
+ }
45
+
46
+ def start(self):
47
+ self.image_dir_filenames = self._media_names(self.root_directory)
48
+ self.output_dir_filenames = self._media_names(self.output_dir)
49
+
50
+ logger.info(f"Start watching image directory: {self.root_directory}")
51
+ self.image_dir_observer = Observer()
52
+ self.image_dir_observer.schedule(self, self.root_directory, recursive=False)
53
+ self.image_dir_observer.start()
54
+
55
+ logger.info(f"Start watching output directory: {self.output_dir}")
56
+ self.output_dir_observer = Observer()
57
+ self.output_dir_observer.schedule(self, self.output_dir, recursive=False)
58
+ self.output_dir_observer.start()
59
+
60
+ def on_modified(self, event):
61
+ if not os.path.isdir(event.src_path):
62
+ return
63
+ if event.src_path == str(self.root_directory):
64
+ logger.info(f"Image directory {event.src_path} modified")
65
+ self.image_dir_filenames = self._media_names(self.root_directory)
66
+ self.modified_time["image"] = datetime.utcnow()
67
+ elif event.src_path == str(self.output_dir):
68
+ logger.info(f"Output directory {event.src_path} modified")
69
+ self.output_dir_filenames = self._media_names(self.output_dir)
70
+ self.modified_time["output"] = datetime.utcnow()
71
+
72
+ def init_app(self, app):
73
+ if self.app is None:
74
+ self.app = app
75
+ app.thumbnail_instance = self
76
+
77
+ if not hasattr(app, "extensions"):
78
+ app.extensions = {}
79
+
80
+ if "thumbnail" in app.extensions:
81
+ raise RuntimeError("Flask-thumbnail extension already initialized")
82
+
83
+ app.extensions["thumbnail"] = self
84
+
85
+ app.config.setdefault("THUMBNAIL_MEDIA_ROOT", self._default_root_directory)
86
+ app.config.setdefault(
87
+ "THUMBNAIL_MEDIA_THUMBNAIL_ROOT", self._default_thumbnail_directory
88
+ )
89
+ app.config.setdefault("THUMBNAIL_MEDIA_URL", self._default_root_url)
90
+ app.config.setdefault(
91
+ "THUMBNAIL_MEDIA_THUMBNAIL_URL", self._default_thumbnail_root_url
92
+ )
93
+ app.config.setdefault("THUMBNAIL_DEFAULT_FORMAT", self._default_format)
94
+
95
+ @property
96
+ def root_directory(self):
97
+ path = self.app.config["THUMBNAIL_MEDIA_ROOT"]
98
+
99
+ if os.path.isabs(path):
100
+ return path
101
+ else:
102
+ return os.path.join(self.app.root_path, path)
103
+
104
+ @property
105
+ def thumbnail_directory(self):
106
+ path = self.app.config["THUMBNAIL_MEDIA_THUMBNAIL_ROOT"]
107
+
108
+ if os.path.isabs(path):
109
+ return path
110
+ else:
111
+ return os.path.join(self.app.root_path, path)
112
+
113
+ @property
114
+ def root_url(self):
115
+ return self.app.config["THUMBNAIL_MEDIA_URL"]
116
+
117
+ @property
118
+ def media_names(self):
119
+ # return self.image_dir_filenames
120
+ return self._media_names(self.root_directory)
121
+
122
+ @property
123
+ def output_media_names(self):
124
+ return self._media_names(self.output_dir)
125
+ # return self.output_dir_filenames
126
+
127
+ @staticmethod
128
+ def _media_names(directory: Path):
129
+ names = sorted([it.name for it in glob_img(directory)])
130
+ res = []
131
+ for name in names:
132
+ path = os.path.join(directory, name)
133
+ img = Image.open(path)
134
+ res.append(
135
+ {
136
+ "name": name,
137
+ "height": img.height,
138
+ "width": img.width,
139
+ "ctime": os.path.getctime(path),
140
+ }
141
+ )
142
+ return res
143
+
144
+ @property
145
+ def thumbnail_url(self):
146
+ return self.app.config["THUMBNAIL_MEDIA_THUMBNAIL_URL"]
147
+
148
+ def get_thumbnail(
149
+ self, directory: Path, original_filename: str, width, height, **options
150
+ ):
151
+ storage = FilesystemStorageBackend(self.app)
152
+ crop = options.get("crop", "fit")
153
+ background = options.get("background")
154
+ quality = options.get("quality", 90)
155
+
156
+ original_path, original_filename = os.path.split(original_filename)
157
+ original_filepath = os.path.join(directory, original_path, original_filename)
158
+ image = Image.open(BytesIO(storage.read(original_filepath)))
159
+
160
+ # keep ratio resize
161
+ if width is not None:
162
+ height = int(image.height * width / image.width)
163
+ else:
164
+ width = int(image.width * height / image.height)
165
+
166
+ thumbnail_size = (width, height)
167
+
168
+ thumbnail_filename = generate_filename(
169
+ original_filename,
170
+ aspect_to_string(thumbnail_size),
171
+ crop,
172
+ background,
173
+ quality,
174
+ )
175
+
176
+ thumbnail_filepath = os.path.join(
177
+ self.thumbnail_directory, original_path, thumbnail_filename
178
+ )
179
+ thumbnail_url = os.path.join(
180
+ self.thumbnail_url, original_path, thumbnail_filename
181
+ )
182
+
183
+ if storage.exists(thumbnail_filepath):
184
+ return thumbnail_url, (width, height)
185
+
186
+ try:
187
+ image.load()
188
+ except (IOError, OSError):
189
+ self.app.logger.warning("Thumbnail not load image: %s", original_filepath)
190
+ return thumbnail_url, (width, height)
191
+
192
+ # get original image format
193
+ options["format"] = options.get("format", image.format)
194
+
195
+ image = self._create_thumbnail(
196
+ image, thumbnail_size, crop, background=background
197
+ )
198
+
199
+ raw_data = self.get_raw_data(image, **options)
200
+ storage.save(thumbnail_filepath, raw_data)
201
+
202
+ return thumbnail_url, (width, height)
203
+
204
+ def get_raw_data(self, image, **options):
205
+ data = {
206
+ "format": self._get_format(image, **options),
207
+ "quality": options.get("quality", 90),
208
+ }
209
+
210
+ _file = BytesIO()
211
+ image.save(_file, **data)
212
+ return _file.getvalue()
213
+
214
+ @staticmethod
215
+ def colormode(image, colormode="RGB"):
216
+ if colormode == "RGB" or colormode == "RGBA":
217
+ if image.mode == "RGBA":
218
+ return image
219
+ if image.mode == "LA":
220
+ return image.convert("RGBA")
221
+ return image.convert(colormode)
222
+
223
+ if colormode == "GRAY":
224
+ return image.convert("L")
225
+
226
+ return image.convert(colormode)
227
+
228
+ @staticmethod
229
+ def background(original_image, color=0xFF):
230
+ size = (max(original_image.size),) * 2
231
+ image = Image.new("L", size, color)
232
+ image.paste(
233
+ original_image,
234
+ tuple(map(lambda x: (x[0] - x[1]) / 2, zip(size, original_image.size))),
235
+ )
236
+
237
+ return image
238
+
239
+ def _get_format(self, image, **options):
240
+ if options.get("format"):
241
+ return options.get("format")
242
+ if image.format:
243
+ return image.format
244
+
245
+ return self.app.config["THUMBNAIL_DEFAULT_FORMAT"]
246
+
247
+ def _create_thumbnail(self, image, size, crop="fit", background=None):
248
+ try:
249
+ resample = Image.Resampling.LANCZOS
250
+ except AttributeError: # pylint: disable=raise-missing-from
251
+ resample = Image.ANTIALIAS
252
+
253
+ if crop == "fit":
254
+ image = ImageOps.fit(image, size, resample)
255
+ else:
256
+ image = image.copy()
257
+ image.thumbnail(size, resample=resample)
258
+
259
+ if background is not None:
260
+ image = self.background(image)
261
+
262
+ image = self.colormode(image)
263
+
264
+ return image
file_manager/storage_backends.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/storage_backends.py
2
+ import errno
3
+ import os
4
+ from abc import ABC, abstractmethod
5
+
6
+
7
+ class BaseStorageBackend(ABC):
8
+ def __init__(self, app=None):
9
+ self.app = app
10
+
11
+ @abstractmethod
12
+ def read(self, filepath, mode="rb", **kwargs):
13
+ raise NotImplementedError
14
+
15
+ @abstractmethod
16
+ def exists(self, filepath):
17
+ raise NotImplementedError
18
+
19
+ @abstractmethod
20
+ def save(self, filepath, data):
21
+ raise NotImplementedError
22
+
23
+
24
+ class FilesystemStorageBackend(BaseStorageBackend):
25
+ def read(self, filepath, mode="rb", **kwargs):
26
+ with open(filepath, mode) as f: # pylint: disable=unspecified-encoding
27
+ return f.read()
28
+
29
+ def exists(self, filepath):
30
+ return os.path.exists(filepath)
31
+
32
+ def save(self, filepath, data):
33
+ directory = os.path.dirname(filepath)
34
+
35
+ if not os.path.exists(directory):
36
+ try:
37
+ os.makedirs(directory)
38
+ except OSError as e:
39
+ if e.errno != errno.EEXIST:
40
+ raise
41
+
42
+ if not os.path.isdir(directory):
43
+ raise IOError("{} is not a directory".format(directory))
44
+
45
+ with open(filepath, "wb") as f:
46
+ f.write(data)
file_manager/utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from: https://github.com/silentsokolov/flask-thumbnails/blob/master/flask_thumbnails/utils.py
2
+ import importlib
3
+ import os
4
+ from pathlib import Path
5
+
6
+ from typing import Union
7
+
8
+
9
+ def generate_filename(original_filename, *options):
10
+ name, ext = os.path.splitext(original_filename)
11
+ for v in options:
12
+ if v:
13
+ name += "_%s" % v
14
+ name += ext
15
+
16
+ return name
17
+
18
+
19
+ def parse_size(size):
20
+ if isinstance(size, int):
21
+ # If the size parameter is a single number, assume square aspect.
22
+ return [size, size]
23
+
24
+ if isinstance(size, (tuple, list)):
25
+ if len(size) == 1:
26
+ # If single value tuple/list is provided, exand it to two elements
27
+ return size + type(size)(size)
28
+ return size
29
+
30
+ try:
31
+ thumbnail_size = [int(x) for x in size.lower().split("x", 1)]
32
+ except ValueError:
33
+ raise ValueError( # pylint: disable=raise-missing-from
34
+ "Bad thumbnail size format. Valid format is INTxINT."
35
+ )
36
+
37
+ if len(thumbnail_size) == 1:
38
+ # If the size parameter only contains a single integer, assume square aspect.
39
+ thumbnail_size.append(thumbnail_size[0])
40
+
41
+ return thumbnail_size
42
+
43
+
44
+ def aspect_to_string(size):
45
+ if isinstance(size, str):
46
+ return size
47
+
48
+ return "x".join(map(str, size))
49
+
50
+
51
+ IMG_SUFFIX = {'.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG'}
52
+
53
+
54
+ def glob_img(p: Union[Path, str], recursive: bool = False):
55
+ p = Path(p)
56
+ if p.is_file() and p.suffix in IMG_SUFFIX:
57
+ yield p
58
+ else:
59
+ if recursive:
60
+ files = Path(p).glob("**/*.*")
61
+ else:
62
+ files = Path(p).glob("*.*")
63
+
64
+ for it in files:
65
+ if it.suffix not in IMG_SUFFIX:
66
+ continue
67
+ yield it
helper.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import sys
4
+ from typing import List, Optional
5
+
6
+ from urllib.parse import urlparse
7
+ import cv2
8
+ from PIL import Image, ImageOps
9
+ import numpy as np
10
+ import torch
11
+ from const import MPS_SUPPORT_MODELS
12
+ from loguru import logger
13
+ from torch.hub import download_url_to_file, get_dir
14
+ import hashlib
15
+
16
+
17
+ def md5sum(filename):
18
+ md5 = hashlib.md5()
19
+ with open(filename, "rb") as f:
20
+ for chunk in iter(lambda: f.read(128 * md5.block_size), b""):
21
+ md5.update(chunk)
22
+ return md5.hexdigest()
23
+
24
+
25
+ def switch_mps_device(model_name, device):
26
+ if model_name not in MPS_SUPPORT_MODELS and str(device) == "mps":
27
+ logger.info(f"{model_name} not support mps, switch to cpu")
28
+ return torch.device("cpu")
29
+ return device
30
+
31
+
32
+ def get_cache_path_by_url(url):
33
+ parts = urlparse(url)
34
+ hub_dir = get_dir()
35
+ model_dir = os.path.join(hub_dir, "checkpoints")
36
+ if not os.path.isdir(model_dir):
37
+ os.makedirs(model_dir)
38
+ filename = os.path.basename(parts.path)
39
+ cached_file = os.path.join(model_dir, filename)
40
+ return cached_file
41
+
42
+
43
+ def download_model(url, model_md5: str = None):
44
+ cached_file = get_cache_path_by_url(url)
45
+ if not os.path.exists(cached_file):
46
+ sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
47
+ hash_prefix = None
48
+ download_url_to_file(url, cached_file, hash_prefix, progress=True)
49
+ if model_md5:
50
+ _md5 = md5sum(cached_file)
51
+ if model_md5 == _md5:
52
+ logger.info(f"Download model success, md5: {_md5}")
53
+ else:
54
+ try:
55
+ os.remove(cached_file)
56
+ logger.error(
57
+ f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
58
+ f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
59
+ )
60
+ except:
61
+ logger.error(
62
+ f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner."
63
+ )
64
+ exit(-1)
65
+
66
+ return cached_file
67
+
68
+
69
+ def ceil_modulo(x, mod):
70
+ if x % mod == 0:
71
+ return x
72
+ return (x // mod + 1) * mod
73
+
74
+
75
+ def handle_error(model_path, model_md5, e):
76
+ _md5 = md5sum(model_path)
77
+ if _md5 != model_md5:
78
+ try:
79
+ os.remove(model_path)
80
+ logger.error(
81
+ f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
82
+ f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
83
+ )
84
+ except:
85
+ logger.error(
86
+ f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner."
87
+ )
88
+ else:
89
+ logger.error(
90
+ f"Failed to load model {model_path},"
91
+ f"please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}"
92
+ )
93
+ exit(-1)
94
+
95
+
96
+ def load_jit_model(url_or_path, device, model_md5: str):
97
+ if os.path.exists(url_or_path):
98
+ model_path = url_or_path
99
+ else:
100
+ model_path = download_model(url_or_path, model_md5)
101
+
102
+ logger.info(f"Loading model from: {model_path}")
103
+ try:
104
+ model = torch.jit.load(model_path, map_location="cpu").to(device)
105
+ except Exception as e:
106
+ handle_error(model_path, model_md5, e)
107
+ model.eval()
108
+ return model
109
+
110
+
111
+ def load_model(model: torch.nn.Module, url_or_path, device, model_md5):
112
+ if os.path.exists(url_or_path):
113
+ model_path = url_or_path
114
+ else:
115
+ model_path = download_model(url_or_path, model_md5)
116
+
117
+ try:
118
+ logger.info(f"Loading model from: {model_path}")
119
+ state_dict = torch.load(model_path, map_location="cpu")
120
+ model.load_state_dict(state_dict, strict=True)
121
+ model.to(device)
122
+ except Exception as e:
123
+ handle_error(model_path, model_md5, e)
124
+ model.eval()
125
+ return model
126
+
127
+
128
+ def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes:
129
+ data = cv2.imencode(
130
+ f".{ext}",
131
+ image_numpy,
132
+ [int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
133
+ )[1]
134
+ image_bytes = data.tobytes()
135
+ return image_bytes
136
+
137
+
138
+ def pil_to_bytes(pil_img, ext: str, exif=None) -> bytes:
139
+ with io.BytesIO() as output:
140
+ pil_img.save(output, format=ext, exif=exif, quality=95)
141
+ image_bytes = output.getvalue()
142
+ return image_bytes
143
+
144
+
145
+ def load_img(img_bytes, gray: bool = False, return_exif: bool = False):
146
+ alpha_channel = None
147
+ image = Image.open(io.BytesIO(img_bytes))
148
+
149
+ try:
150
+ if return_exif:
151
+ exif = image.getexif()
152
+ except:
153
+ exif = None
154
+ logger.error("Failed to extract exif from image")
155
+
156
+ try:
157
+ image = ImageOps.exif_transpose(image)
158
+ except:
159
+ pass
160
+
161
+ if gray:
162
+ image = image.convert("L")
163
+ np_img = np.array(image)
164
+ else:
165
+ if image.mode == "RGBA":
166
+ np_img = np.array(image)
167
+ alpha_channel = np_img[:, :, -1]
168
+ np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB)
169
+ else:
170
+ image = image.convert("RGB")
171
+ np_img = np.array(image)
172
+
173
+ if return_exif:
174
+ return np_img, alpha_channel, exif
175
+ return np_img, alpha_channel
176
+
177
+
178
+ def norm_img(np_img):
179
+ if len(np_img.shape) == 2:
180
+ np_img = np_img[:, :, np.newaxis]
181
+ np_img = np.transpose(np_img, (2, 0, 1))
182
+ np_img = np_img.astype("float32") / 255
183
+ return np_img
184
+
185
+
186
+ def resize_max_size(
187
+ np_img, size_limit: int, interpolation=cv2.INTER_CUBIC
188
+ ) -> np.ndarray:
189
+ # Resize image's longer size to size_limit if longer size larger than size_limit
190
+ h, w = np_img.shape[:2]
191
+ if max(h, w) > size_limit:
192
+ ratio = size_limit / max(h, w)
193
+ new_w = int(w * ratio + 0.5)
194
+ new_h = int(h * ratio + 0.5)
195
+ return cv2.resize(np_img, dsize=(new_w, new_h), interpolation=interpolation)
196
+ else:
197
+ return np_img
198
+
199
+
200
+ def pad_img_to_modulo(
201
+ img: np.ndarray, mod: int, square: bool = False, min_size: Optional[int] = None
202
+ ):
203
+ """
204
+
205
+ Args:
206
+ img: [H, W, C]
207
+ mod:
208
+ square: 是否为正方形
209
+ min_size:
210
+
211
+ Returns:
212
+
213
+ """
214
+ if len(img.shape) == 2:
215
+ img = img[:, :, np.newaxis]
216
+ height, width = img.shape[:2]
217
+ out_height = ceil_modulo(height, mod)
218
+ out_width = ceil_modulo(width, mod)
219
+
220
+ if min_size is not None:
221
+ assert min_size % mod == 0
222
+ out_width = max(min_size, out_width)
223
+ out_height = max(min_size, out_height)
224
+
225
+ if square:
226
+ max_size = max(out_height, out_width)
227
+ out_height = max_size
228
+ out_width = max_size
229
+
230
+ return np.pad(
231
+ img,
232
+ ((0, out_height - height), (0, out_width - width), (0, 0)),
233
+ mode="symmetric",
234
+ )
235
+
236
+
237
+ def boxes_from_mask(mask: np.ndarray) -> List[np.ndarray]:
238
+ """
239
+ Args:
240
+ mask: (h, w, 1) 0~255
241
+
242
+ Returns:
243
+
244
+ """
245
+ height, width = mask.shape[:2]
246
+ _, thresh = cv2.threshold(mask, 127, 255, 0)
247
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
248
+
249
+ boxes = []
250
+ for cnt in contours:
251
+ x, y, w, h = cv2.boundingRect(cnt)
252
+ box = np.array([x, y, x + w, y + h]).astype(int)
253
+
254
+ box[::2] = np.clip(box[::2], 0, width)
255
+ box[1::2] = np.clip(box[1::2], 0, height)
256
+ boxes.append(box)
257
+
258
+ return boxes
259
+
260
+
261
+ def only_keep_largest_contour(mask: np.ndarray) -> List[np.ndarray]:
262
+ """
263
+ Args:
264
+ mask: (h, w) 0~255
265
+
266
+ Returns:
267
+
268
+ """
269
+ _, thresh = cv2.threshold(mask, 127, 255, 0)
270
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
271
+
272
+ max_area = 0
273
+ max_index = -1
274
+ for i, cnt in enumerate(contours):
275
+ area = cv2.contourArea(cnt)
276
+ if area > max_area:
277
+ max_area = area
278
+ max_index = i
279
+
280
+ if max_index != -1:
281
+ new_mask = np.zeros_like(mask)
282
+ return cv2.drawContours(new_mask, contours, max_index, 255, -1)
283
+ else:
284
+ return mask
interactive_seg.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cv2
4
+ from typing import Tuple, List
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from loguru import logger
8
+ from pydantic import BaseModel
9
+ import numpy as np
10
+
11
+ from helper import only_keep_largest_contour, load_jit_model
12
+
13
+
14
+ class Click(BaseModel):
15
+ # [y, x]
16
+ coords: Tuple[float, float]
17
+ is_positive: bool
18
+ indx: int
19
+
20
+ @property
21
+ def coords_and_indx(self):
22
+ return (*self.coords, self.indx)
23
+
24
+ def scale(self, x_ratio: float, y_ratio: float) -> 'Click':
25
+ return Click(
26
+ coords=(self.coords[0] * x_ratio, self.coords[1] * y_ratio),
27
+ is_positive=self.is_positive,
28
+ indx=self.indx
29
+ )
30
+
31
+
32
+ class ResizeTrans:
33
+ def __init__(self, size=480):
34
+ super().__init__()
35
+ self.crop_height = size
36
+ self.crop_width = size
37
+
38
+ def transform(self, image_nd, clicks_lists):
39
+ assert image_nd.shape[0] == 1 and len(clicks_lists) == 1
40
+ image_height, image_width = image_nd.shape[2:4]
41
+ self.image_height = image_height
42
+ self.image_width = image_width
43
+ image_nd_r = F.interpolate(image_nd, (self.crop_height, self.crop_width), mode='bilinear', align_corners=True)
44
+
45
+ y_ratio = self.crop_height / image_height
46
+ x_ratio = self.crop_width / image_width
47
+
48
+ clicks_lists_resized = []
49
+ for clicks_list in clicks_lists:
50
+ clicks_list_resized = [click.scale(y_ratio, x_ratio) for click in clicks_list]
51
+ clicks_lists_resized.append(clicks_list_resized)
52
+
53
+ return image_nd_r, clicks_lists_resized
54
+
55
+ def inv_transform(self, prob_map):
56
+ new_prob_map = F.interpolate(prob_map, (self.image_height, self.image_width), mode='bilinear',
57
+ align_corners=True)
58
+
59
+ return new_prob_map
60
+
61
+
62
+ class ISPredictor(object):
63
+ def __init__(
64
+ self,
65
+ model,
66
+ device,
67
+ open_kernel_size: int,
68
+ dilate_kernel_size: int,
69
+ net_clicks_limit=None,
70
+ zoom_in=None,
71
+ infer_size=384,
72
+ ):
73
+ self.model = model
74
+ self.open_kernel_size = open_kernel_size
75
+ self.dilate_kernel_size = dilate_kernel_size
76
+ self.net_clicks_limit = net_clicks_limit
77
+ self.device = device
78
+ self.zoom_in = zoom_in
79
+ self.infer_size = infer_size
80
+
81
+ # self.transforms = [zoom_in] if zoom_in is not None else []
82
+
83
+ def __call__(self, input_image: torch.Tensor, clicks: List[Click], prev_mask):
84
+ """
85
+
86
+ Args:
87
+ input_image: [1, 3, H, W] [0~1]
88
+ clicks: List[Click]
89
+ prev_mask: [1, 1, H, W]
90
+
91
+ Returns:
92
+
93
+ """
94
+ transforms = [ResizeTrans(self.infer_size)]
95
+ input_image = torch.cat((input_image, prev_mask), dim=1)
96
+
97
+ # image_nd resized to infer_size
98
+ for t in transforms:
99
+ image_nd, clicks_lists = t.transform(input_image, [clicks])
100
+
101
+ # image_nd.shape = [1, 4, 256, 256]
102
+ # points_nd.sha[e = [1, 2, 3]
103
+ # clicks_lists[0][0] Click 类
104
+ points_nd = self.get_points_nd(clicks_lists)
105
+ pred_logits = self.model(image_nd, points_nd)
106
+ pred = torch.sigmoid(pred_logits)
107
+ pred = self.post_process(pred)
108
+
109
+ prediction = F.interpolate(pred, mode='bilinear', align_corners=True,
110
+ size=image_nd.size()[2:])
111
+
112
+ for t in reversed(transforms):
113
+ prediction = t.inv_transform(prediction)
114
+
115
+ # if self.zoom_in is not None and self.zoom_in.check_possible_recalculation():
116
+ # return self.get_prediction(clicker)
117
+
118
+ return prediction.cpu().numpy()[0, 0]
119
+
120
+ def post_process(self, pred: torch.Tensor) -> torch.Tensor:
121
+ pred_mask = pred.cpu().numpy()[0][0]
122
+ # morph_open to remove small noise
123
+ kernel_size = self.open_kernel_size
124
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
125
+ pred_mask = cv2.morphologyEx(pred_mask, cv2.MORPH_OPEN, kernel, iterations=1)
126
+
127
+ # Why dilate: make region slightly larger to avoid missing some pixels, this generally works better
128
+ dilate_kernel_size = self.dilate_kernel_size
129
+ if dilate_kernel_size > 1:
130
+ kernel = cv2.getStructuringElement(cv2.MORPH_DILATE, (dilate_kernel_size, dilate_kernel_size))
131
+ pred_mask = cv2.dilate(pred_mask, kernel, 1)
132
+ return torch.from_numpy(pred_mask).unsqueeze(0).unsqueeze(0)
133
+
134
+ def get_points_nd(self, clicks_lists):
135
+ total_clicks = []
136
+ num_pos_clicks = [sum(x.is_positive for x in clicks_list) for clicks_list in clicks_lists]
137
+ num_neg_clicks = [len(clicks_list) - num_pos for clicks_list, num_pos in zip(clicks_lists, num_pos_clicks)]
138
+ num_max_points = max(num_pos_clicks + num_neg_clicks)
139
+ if self.net_clicks_limit is not None:
140
+ num_max_points = min(self.net_clicks_limit, num_max_points)
141
+ num_max_points = max(1, num_max_points)
142
+
143
+ for clicks_list in clicks_lists:
144
+ clicks_list = clicks_list[:self.net_clicks_limit]
145
+ pos_clicks = [click.coords_and_indx for click in clicks_list if click.is_positive]
146
+ pos_clicks = pos_clicks + (num_max_points - len(pos_clicks)) * [(-1, -1, -1)]
147
+
148
+ neg_clicks = [click.coords_and_indx for click in clicks_list if not click.is_positive]
149
+ neg_clicks = neg_clicks + (num_max_points - len(neg_clicks)) * [(-1, -1, -1)]
150
+ total_clicks.append(pos_clicks + neg_clicks)
151
+
152
+ return torch.tensor(total_clicks, device=self.device)
153
+
154
+
155
+ INTERACTIVE_SEG_MODEL_URL = os.environ.get(
156
+ "INTERACTIVE_SEG_MODEL_URL",
157
+ "https://github.com/Sanster/models/releases/download/clickseg_pplnet/clickseg_pplnet.pt",
158
+ )
159
+ INTERACTIVE_SEG_MODEL_MD5 = os.environ.get("INTERACTIVE_SEG_MODEL_MD5", "8ca44b6e02bca78f62ec26a3c32376cf")
160
+
161
+
162
+ class InteractiveSeg:
163
+ def __init__(self, infer_size=384, open_kernel_size=3, dilate_kernel_size=3):
164
+ device = torch.device('cpu')
165
+ model = load_jit_model(INTERACTIVE_SEG_MODEL_URL, device, INTERACTIVE_SEG_MODEL_MD5).eval()
166
+ self.predictor = ISPredictor(model, device,
167
+ infer_size=infer_size,
168
+ open_kernel_size=open_kernel_size,
169
+ dilate_kernel_size=dilate_kernel_size)
170
+
171
+ def __call__(self, image, clicks, prev_mask=None):
172
+ """
173
+
174
+ Args:
175
+ image: [H,W,C] RGB
176
+ clicks:
177
+
178
+ Returns:
179
+
180
+ """
181
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
182
+ image = torch.from_numpy((image / 255).transpose(2, 0, 1)).unsqueeze(0).float()
183
+ if prev_mask is None:
184
+ mask = torch.zeros_like(image[:, :1, :, :])
185
+ else:
186
+ logger.info('InteractiveSeg run with prev_mask')
187
+ mask = torch.from_numpy(prev_mask / 255).unsqueeze(0).unsqueeze(0).float()
188
+
189
+ pred_probs = self.predictor(image, clicks, mask)
190
+ pred_mask = pred_probs > 0.5
191
+ pred_mask = (pred_mask * 255).astype(np.uint8)
192
+
193
+ # Find largest contour
194
+ # pred_mask = only_keep_largest_contour(pred_mask)
195
+ # To simplify frontend process, add mask brush color here
196
+ fg = pred_mask == 255
197
+ bg = pred_mask != 255
198
+ pred_mask = cv2.cvtColor(pred_mask, cv2.COLOR_GRAY2BGRA)
199
+ # frontend brush color "ffcc00bb"
200
+ pred_mask[bg] = 0
201
+ pred_mask[fg] = [255, 203, 0, int(255 * 0.73)]
202
+ pred_mask = cv2.cvtColor(pred_mask, cv2.COLOR_BGRA2RGBA)
203
+ return pred_mask
make_gif.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import math
3
+ from pathlib import Path
4
+
5
+ from PIL import Image, ImageDraw
6
+
7
+
8
+ def keep_ratio_resize(img, size, resample=Image.BILINEAR):
9
+ if img.width > img.height:
10
+ w = size
11
+ h = int(img.height * size / img.width)
12
+ else:
13
+ h = size
14
+ w = int(img.width * size / img.height)
15
+ return img.resize((w, h), resample)
16
+
17
+
18
+ def cubic_bezier(p1, p2, duration: int, frames: int):
19
+ """
20
+
21
+ Args:
22
+ p1:
23
+ p2:
24
+ duration: Total duration of the curve
25
+ frames:
26
+
27
+ Returns:
28
+
29
+ """
30
+ x0, y0 = (0, 0)
31
+ x1, y1 = p1
32
+ x2, y2 = p2
33
+ x3, y3 = (1, 1)
34
+
35
+ def cal_y(t):
36
+ return math.pow(1 - t, 3) * y0 + \
37
+ 3 * math.pow(1 - t, 2) * t * y1 + \
38
+ 3 * (1 - t) * math.pow(t, 2) * y2 + \
39
+ math.pow(t, 3) * y3
40
+
41
+ def cal_x(t):
42
+ return math.pow(1 - t, 3) * x0 + \
43
+ 3 * math.pow(1 - t, 2) * t * x1 + \
44
+ 3 * (1 - t) * math.pow(t, 2) * x2 + \
45
+ math.pow(t, 3) * x3
46
+
47
+ res = []
48
+ for t in range(0, 1 * frames, duration):
49
+ t = t / frames
50
+ res.append((cal_x(t), cal_y(t)))
51
+
52
+ res.append((1, 0))
53
+ return res
54
+
55
+
56
+ def make_compare_gif(
57
+ clean_img: Image.Image,
58
+ src_img: Image.Image,
59
+ max_side_length: int = 600,
60
+ splitter_width: int = 5,
61
+ splitter_color=(255, 203, 0, int(255 * 0.73))
62
+ ):
63
+ if clean_img.size != src_img.size:
64
+ clean_img = clean_img.resize(src_img.size, Image.BILINEAR)
65
+
66
+ duration_per_frame = 20
67
+ num_frames = 50
68
+ # erase-in-out
69
+ cubic_bezier_points = cubic_bezier((0.33, 0), (0.66, 1), 1, num_frames)
70
+ cubic_bezier_points.reverse()
71
+
72
+ max_side_length = min(max_side_length, max(clean_img.size))
73
+
74
+ src_img = keep_ratio_resize(src_img, max_side_length)
75
+ clean_img = keep_ratio_resize(clean_img, max_side_length)
76
+ width, height = src_img.size
77
+
78
+ # Generate images to make Gif from right to left
79
+ images = []
80
+
81
+ for i in range(num_frames):
82
+ new_frame = Image.new('RGB', (width, height))
83
+ new_frame.paste(clean_img, (0, 0))
84
+
85
+ left = int(cubic_bezier_points[i][0] * width)
86
+ cropped_src_img = src_img.crop((left, 0, width, height))
87
+ new_frame.paste(cropped_src_img, (left, 0, width, height))
88
+ if i != num_frames - 1:
89
+ # draw a yellow splitter on the edge of the cropped image
90
+ draw = ImageDraw.Draw(new_frame)
91
+ draw.line([(left, 0), (left, height)], width=splitter_width, fill=splitter_color)
92
+ images.append(new_frame)
93
+
94
+ for i in range(10):
95
+ images.append(src_img)
96
+
97
+ cubic_bezier_points.reverse()
98
+ # Generate images to make Gif from left to right
99
+ for i in range(num_frames):
100
+ new_frame = Image.new('RGB', (width, height))
101
+ new_frame.paste(src_img, (0, 0))
102
+
103
+ right = int(cubic_bezier_points[i][0] * width)
104
+ cropped_src_img = clean_img.crop((0, 0, right, height))
105
+ new_frame.paste(cropped_src_img, (0, 0, right, height))
106
+ if i != num_frames - 1:
107
+ # draw a yellow splitter on the edge of the cropped image
108
+ draw = ImageDraw.Draw(new_frame)
109
+ draw.line([(right, 0), (right, height)], width=splitter_width, fill=splitter_color)
110
+ images.append(new_frame)
111
+
112
+ images.append(clean_img)
113
+
114
+ img_byte_arr = io.BytesIO()
115
+ clean_img.save(
116
+ img_byte_arr,
117
+ format='GIF',
118
+ save_all=True,
119
+ include_color_table=True,
120
+ append_images=images,
121
+ optimize=False,
122
+ duration=duration_per_frame,
123
+ loop=0
124
+ )
125
+ return img_byte_arr.getvalue()
model/__init__.py ADDED
File without changes
model/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (160 Bytes). View file
 
model/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (160 Bytes). View file
 
model/__pycache__/base.cpython-38.pyc ADDED
Binary file (7.82 kB). View file
 
model/__pycache__/base.cpython-39.pyc ADDED
Binary file (7.8 kB). View file
 
model/__pycache__/fcf.cpython-38.pyc ADDED
Binary file (34.2 kB). View file
 
model/__pycache__/instruct_pix2pix.cpython-38.pyc ADDED
Binary file (2.78 kB). View file
 
model/__pycache__/lama.cpython-38.pyc ADDED
Binary file (1.84 kB). View file