kbora commited on
Commit
6af7294
1 Parent(s): b7d1bae

Upload 51 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +20 -0
  2. README.md +0 -10
  3. __pycache__/app.cpython-310.pyc +0 -0
  4. app.py +5 -0
  5. blocks/.DS_Store +0 -0
  6. blocks/__init__.py +17 -0
  7. blocks/__pycache__/__init__.cpython-310.pyc +0 -0
  8. blocks/__pycache__/__init__.cpython-39.pyc +0 -0
  9. blocks/__pycache__/download.cpython-310.pyc +0 -0
  10. blocks/__pycache__/download.cpython-39.pyc +0 -0
  11. blocks/__pycache__/img2img.cpython-39.pyc +0 -0
  12. blocks/__pycache__/inpainting.cpython-310.pyc +0 -0
  13. blocks/__pycache__/inpainting.cpython-39.pyc +0 -0
  14. blocks/__pycache__/main.cpython-310.pyc +0 -0
  15. blocks/__pycache__/main.cpython-39.pyc +0 -0
  16. blocks/__pycache__/text2img.cpython-39.pyc +0 -0
  17. blocks/download.py +400 -0
  18. blocks/img2img.py +204 -0
  19. blocks/inpainting.py +219 -0
  20. blocks/main.py +31 -0
  21. blocks/text2img.py +235 -0
  22. blocks/utils/__init__.py +0 -0
  23. blocks/utils/__pycache__/__init__.cpython-39.pyc +0 -0
  24. blocks/utils/__pycache__/device.cpython-39.pyc +0 -0
  25. blocks/utils/__pycache__/prompt2prompt.cpython-39.pyc +0 -0
  26. blocks/utils/__pycache__/schedulers.cpython-39.pyc +0 -0
  27. blocks/utils/device.py +16 -0
  28. blocks/utils/prompt2prompt.py +23 -0
  29. blocks/utils/schedulers.py +47 -0
  30. diffmodels/__init__.py +25 -0
  31. diffmodels/__pycache__/__init__.cpython-310.pyc +0 -0
  32. diffmodels/__pycache__/__init__.cpython-39.pyc +0 -0
  33. diffmodels/__pycache__/diffusion_utils.cpython-310.pyc +0 -0
  34. diffmodels/__pycache__/diffusion_utils.cpython-39.pyc +0 -0
  35. diffmodels/__pycache__/simple_diffusion.cpython-310.pyc +0 -0
  36. diffmodels/diffusion_utils.py +218 -0
  37. diffmodels/simple_diffusion.py +309 -0
  38. diffmodels/textual_inversion.py +269 -0
  39. image_0.png +0 -0
  40. requirements.txt +80 -0
  41. static/load_from_artwork.js +46 -0
  42. static/save_artwork.js +63 -0
  43. utils/__init__.py +0 -0
  44. utils/__pycache__/__init__.cpython-310.pyc +0 -0
  45. utils/__pycache__/device.cpython-310.pyc +0 -0
  46. utils/__pycache__/image.cpython-310.pyc +0 -0
  47. utils/__pycache__/log.cpython-310.pyc +0 -0
  48. utils/__pycache__/prompt2prompt.cpython-310.pyc +0 -0
  49. utils/device.py +22 -0
  50. utils/image.py +29 -0
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official CUDA runtime as a parent image
2
+ FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /usr/src/app
6
+
7
+ # Install Python
8
+ RUN apt-get update && apt-get install -y python3.8 python3-pip
9
+
10
+ # Copy the current directory contents into the container at /usr/src/app
11
+ COPY . /usr/src/app
12
+
13
+ # Install any needed packages specified in requirements.txt
14
+ RUN pip3 install --no-cache-dir -r requirements.txt
15
+
16
+ # Make port 80 available to the world outside this container
17
+ EXPOSE 7860
18
+
19
+ # Run app.py when the container launches
20
+ CMD ["python3", "app.py"]
README.md CHANGED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Minerva Generate Docker
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
__pycache__/app.cpython-310.pyc ADDED
Binary file (255 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from blocks.main import main_box
3
+ blocks = main_box()
4
+ blocks.launch(share = True)
5
+
blocks/.DS_Store ADDED
Binary file (6.15 kB). View file
 
blocks/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IMG2IMG_MODEL_LIST = {
2
+ "StableDiffusion 1.5" : "runwayml/stable-diffusion-v1-5",
3
+ "StableDiffusion 2.1" : "stabilityai/stable-diffusion-2-1",
4
+ "OpenJourney v4" : "prompthero/openjourney-v4",
5
+ "DreamLike 1.0" : "dreamlike-art/dreamlike-diffusion-1.0",
6
+ "DreamLike 2.0" : "dreamlike-art/dreamlike-photoreal-2.0"
7
+ }
8
+
9
+ TEXT2IMG_MODEL_LIST = {
10
+ "OpenJourney v4" : "prompthero/openjourney-v4",
11
+ "StableDiffusion 1.5" : "runwayml/stable-diffusion-v1-5",
12
+ "StableDiffusion 2.1" : "stabilityai/stable-diffusion-2-1",
13
+ "DreamLike 1.0" : "dreamlike-art/dreamlike-diffusion-1.0",
14
+ "DreamLike 2.0" : "dreamlike-art/dreamlike-photoreal-2.0",
15
+ "DreamShaper" : "Lykon/DreamShaper",
16
+ "NeverEnding-Dream" : "Lykon/NeverEnding-Dream"
17
+ }
blocks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (631 Bytes). View file
 
blocks/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (629 Bytes). View file
 
blocks/__pycache__/download.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
blocks/__pycache__/download.cpython-39.pyc ADDED
Binary file (29.7 kB). View file
 
blocks/__pycache__/img2img.cpython-39.pyc ADDED
Binary file (5 kB). View file
 
blocks/__pycache__/inpainting.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
blocks/__pycache__/inpainting.cpython-39.pyc ADDED
Binary file (5.26 kB). View file
 
blocks/__pycache__/main.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
blocks/__pycache__/main.cpython-39.pyc ADDED
Binary file (1.42 kB). View file
 
blocks/__pycache__/text2img.cpython-39.pyc ADDED
Binary file (5.12 kB). View file
 
blocks/download.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
+ </svg>"""
5
+
6
+ def get_community_loading_icon(task = "text2img"):
7
+ if task == "text2img":
8
+ community_icon = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
9
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
10
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
11
+ </svg>"""
12
+ loading_icon = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
13
+ style="color: #ffffff;
14
+ "
15
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
16
+
17
+ elif task == "img2img":
18
+ community_icon = """<svg id="share-btn-share-icon-img2img" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
19
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
20
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
21
+ </svg>"""
22
+ loading_icon = """<svg id="share-btn-loading-icon-img2img" style="display:none;" class="animate-spin"
23
+ style="color: #ffffff;
24
+ "
25
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
26
+
27
+ elif task == "inpainting":
28
+ community_icon = """<svg id="share-btn-share-icon-inpainting" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
29
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
30
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
31
+ </svg>"""
32
+ loading_icon = """<svg id="share-btn-loading-icon-inpainting" style="display:none;" class="animate-spin"
33
+ style="color: #ffffff;
34
+ "
35
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
36
+ return community_icon, loading_icon
37
+
38
+ loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
39
+ style="color: #ffffff;
40
+ "
41
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
42
+
43
+ CSS = """
44
+ #col-container {margin-left: auto; margin-right: auto;}
45
+ a {text-decoration-line: underline; font-weight: 600;}
46
+ .animate-spin {
47
+ animation: spin 1s linear infinite;
48
+ }
49
+ @keyframes spin {
50
+ from { transform: rotate(0deg); }
51
+ to { transform: rotate(360deg); }
52
+ }
53
+ .gradio-container {
54
+ font-family: 'IBM Plex Sans', sans-serif;
55
+ }
56
+ .gr-button {
57
+ color: white;
58
+ border-color: black;
59
+ background: black;
60
+ }
61
+ input[type='range'] {
62
+ accent-color: black;
63
+ }
64
+ .dark input[type='range'] {
65
+ accent-color: #dfdfdf;
66
+ }
67
+ .container {
68
+ max-width: 730px;
69
+ margin: auto;
70
+ padding-top: 1.5rem;
71
+ }
72
+ #gallery {
73
+ min-height: 22rem;
74
+ margin-bottom: 15px;
75
+ margin-left: auto;
76
+ margin-right: auto;
77
+ border-bottom-right-radius: .5rem !important;
78
+ border-bottom-left-radius: .5rem !important;
79
+ }
80
+ #gallery>div>.h-full {
81
+ min-height: 20rem;
82
+ }
83
+ .details:hover {
84
+ text-decoration: underline;
85
+ }
86
+ .gr-button {
87
+ white-space: nowrap;
88
+ }
89
+ .gr-button:focus {
90
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
91
+ outline: none;
92
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
93
+ --tw-border-opacity: 1;
94
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
95
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
96
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
97
+ --tw-ring-opacity: .5;
98
+ }
99
+ #advanced-btn {
100
+ font-size: .7rem !important;
101
+ line-height: 19px;
102
+ margin-top: 12px;
103
+ margin-bottom: 12px;
104
+ padding: 2px 8px;
105
+ border-radius: 14px !important;
106
+ }
107
+ #advanced-options {
108
+ display: none;
109
+ margin-bottom: 20px;
110
+ }
111
+ .footer {
112
+ margin-bottom: 45px;
113
+ margin-top: 35px;
114
+ text-align: center;
115
+ border-bottom: 1px solid #e5e5e5;
116
+ }
117
+ .footer>p {
118
+ font-size: .8rem;
119
+ display: inline-block;
120
+ padding: 0 10px;
121
+ transform: translateY(10px);
122
+ background: white;
123
+ }
124
+ .dark .footer {
125
+ border-color: #303030;
126
+ }
127
+ .dark .footer>p {
128
+ background: #0b0f19;
129
+ }
130
+ .acknowledgments h4{
131
+ margin: 1.25em 0 .25em 0;
132
+ font-weight: bold;
133
+ font-size: 115%;
134
+ }
135
+ .animate-spin {
136
+ animation: spin 1s linear infinite;
137
+ }
138
+ @keyframes spin {
139
+ from {
140
+ transform: rotate(0deg);
141
+ }
142
+ to {
143
+ transform: rotate(360deg);
144
+ }
145
+ }
146
+ #share-btn-container {
147
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
148
+ margin-top: 10px;
149
+ margin-left: auto;
150
+ margin-right: auto;
151
+ }
152
+ #share-btn {
153
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
154
+ }
155
+ #share-btn * {
156
+ all: unset;
157
+ }
158
+
159
+ #share-btn-inpainting {
160
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
161
+ }
162
+ #share-btn-inpainting * {
163
+ all: unset;
164
+ }
165
+
166
+ #share-btn-img2img {
167
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
168
+ }
169
+ #share-btn-img2img * {
170
+ all: unset;
171
+ }
172
+ #share-btn-container div:nth-child(-n+2){
173
+ width: auto !important;
174
+ min-height: 0px !important;
175
+ }
176
+ #share-btn-container .wrap {
177
+ display: none !important;
178
+ }
179
+
180
+ #download-btn-container {
181
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
182
+ margin-top: 10px;
183
+ margin-left: auto;
184
+ margin-right: auto;
185
+ }
186
+ #download-btn {
187
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
188
+ }
189
+ #download-btn * {
190
+ all: unset;
191
+ }
192
+ #download-btn-container div:nth-child(-n+2){
193
+ width: auto !important;
194
+ min-height: 0px !important;
195
+ }
196
+ #download-btn-container .wrap {
197
+ display: none !important;
198
+ }
199
+
200
+ .gr-form{
201
+ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
202
+ }
203
+ #prompt-container{
204
+ gap: 0;
205
+ }
206
+
207
+ #prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
208
+ #component-16{border-top-width: 1px!important;margin-top: 1em}
209
+ .image_duplication{position: absolute; width: 100px; left: 50px}
210
+ """
211
+
212
+ # window.addEventListener('message', function (event) {
213
+ # if (event.origin !== 'http://127.0.0.1:5000'){
214
+ # console.log('Origin not allowed');
215
+ # return;
216
+ # }
217
+ # userId = event.data.userId;
218
+ # console.log('User ID received from parent:', userId);
219
+ # });
220
+
221
+ def get_share_js():
222
+ share_js = """
223
+ async () => {
224
+ // Get the username from the URL itself
225
+ const urlParams = new URLSearchParams(window.location.search);
226
+ const username = urlParams.get('username');
227
+
228
+ async function uploadFile(
229
+ file,
230
+ _meta_prompt,
231
+ _meta_negative_prompt,
232
+ _meta_model_name,
233
+ _meta_scheduler_name,
234
+ _meta_model_guidance_scale,
235
+ _meta_model_num_steps,
236
+ _meta_model_image_size,
237
+ _meta_seed,
238
+ _meta_mask = null,
239
+ _meta_reference_image = null,
240
+ ){
241
+ const UPLOAD_URL = 'http://127.0.0.1:5000/v1/api/upload-image';
242
+ const formData = new FormData();
243
+ formData.append('file', file);
244
+
245
+ // Add the meta data headers to the form data
246
+ formData.append('text_prompt', _meta_prompt);
247
+ formData.append('negative_prompt', _meta_negative_prompt);
248
+ formData.append('model_name', _meta_model_name);
249
+ formData.append('model_guidance_scale', _meta_model_guidance_scale);
250
+ formData.append('model_num_steps', _meta_model_num_steps);
251
+ formData.append('scheduler_name', _meta_scheduler_name);
252
+ formData.append('seed', _meta_seed);
253
+ formData.append('model_image_size', _meta_model_image_size);
254
+
255
+ // Add the optional meta data headers to the form data
256
+ if(_meta_mask){
257
+ formData.append('mask', _meta_mask);
258
+ }
259
+ if(_meta_reference_image){
260
+ formData.append('reference_image', _meta_reference_image);
261
+ }
262
+
263
+ formData.append('username',username); // This is constant for all the images
264
+ const response = await fetch(UPLOAD_URL, {
265
+ method: 'POST',
266
+ headers: {
267
+ 'X-Requested-With': 'XMLHttpRequest',
268
+ },
269
+ body: formData,
270
+ });
271
+ const url = await response.text(); // This returns the URL of the uploaded file (S3) bucket
272
+ return url;
273
+ }
274
+
275
+ const gradioEl = document.querySelector('gradio-app');
276
+ const imgEls = gradioEl.querySelectorAll('#gallery img');
277
+
278
+ // Get the necessary fields
279
+ const promptTxt = gradioEl.querySelector('#prompt-text-input textarea').value;
280
+ const negativePromptTxt = gradioEl.querySelector('#negative-prompt-text-input textarea').value;
281
+
282
+ console.log(promptTxt);
283
+ console.log(negativePromptTxt);
284
+
285
+ // Get values from the sliders
286
+ const modelGuidanceScale = parseFloat(gradioEl.querySelector('#guidance-scale-slider input').value);
287
+ console.log(modelGuidanceScale);
288
+
289
+ const numSteps = parseInt(gradioEl.querySelector('#num-inference-step-slider input').value);
290
+ const imageSize = parseInt(gradioEl.querySelector('#image-size-slider input').value);
291
+ const seed = parseInt(gradioEl.querySelector('#seed-slider input').value);
292
+
293
+ console.log(numSteps);
294
+ console.log(imageSize);
295
+ console.log(seed);
296
+
297
+ // Get the values from dropdowns
298
+ const modelName = gradioEl.querySelector('#model-dropdown input').value;
299
+ const schedulerName = gradioEl.querySelector('#scheduler-dropdown input').value;
300
+
301
+ console.log(modelName);
302
+ console.log(schedulerName);
303
+
304
+ const shareBtnEl = gradioEl.querySelector('#share-btn');
305
+ const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
306
+ const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
307
+
308
+ if(!imgEls.length){
309
+ return;
310
+ };
311
+
312
+ shareBtnEl.style.pointerEvents = 'none';
313
+ shareIconEl.style.display = 'none';
314
+ loadingIconEl.style.removeProperty('display');
315
+ const files = await Promise.all(
316
+ [...imgEls].map(async (imgEl) => {
317
+ const res = await fetch(imgEl.src);
318
+ const blob = await res.blob();
319
+ const fileSrc = imgEl.src.split('/').pop(); // Get the file name from the img src path
320
+ const imgId = Date.now();
321
+ const fileName = `${fileSrc}-${imgId}.jpg`; // Fixed fileName construction
322
+ return new File([blob], fileName, { type: 'image/jpeg' });
323
+ })
324
+ );
325
+
326
+ // Ensure that only one image is uploaded by taking the first element if there are multiple
327
+ if (files.length > 1) {
328
+ files.splice(1, files.length - 1);
329
+ }
330
+
331
+ const urls = await Promise.all(files.map((f) => uploadFile(
332
+ f,
333
+ promptTxt,
334
+ negativePromptTxt,
335
+ modelName,
336
+ schedulerName,
337
+ modelGuidanceScale,
338
+ numSteps,
339
+ imageSize,
340
+ seed,
341
+ )));
342
+ const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
343
+
344
+ shareBtnEl.style.removeProperty('pointer-events');
345
+ shareIconEl.style.removeProperty('display');
346
+ loadingIconEl.style.display = 'none';
347
+ }
348
+ """
349
+ return share_js
350
+
351
+ def get_load_from_artwork_js():
352
+ load_artwork_js = """
353
+ async () => {
354
+ const urlParams = new URLSearchParams(window.location.search);
355
+ const username = urlParams.get('username');
356
+ const artworkId = urlParams.get('artworkId');
357
+
358
+ const LOAD_URL = `http://127.0.0.1:5000/v1/api/load-parameters?artworkId=${artworkId}`;
359
+ const response = await fetch(LOAD_URL, {
360
+ method: 'GET',
361
+ headers: {
362
+ 'X-Requested-With': 'XMLHttpRequest',
363
+ }
364
+ });
365
+
366
+ // Check if the response is okay
367
+ if (!response.ok) {
368
+ console.error("An error occurred while fetching the parameters.");
369
+ return;
370
+ }
371
+
372
+ const parameters = await response.json(); // Assuming you're getting a JSON response
373
+
374
+ // Get the necessary elements
375
+ const gradioEl = document.querySelector('gradio-app');
376
+ const promptInput = gradioEl.querySelector('#prompt-text-input textarea');
377
+ const negativePromptInput = gradioEl.querySelector('#negative-prompt-text-input textarea');
378
+
379
+ // Get the slider inputs
380
+ const guidanceScaleInput = gradioEl.querySelector('#guidance-scale-slider input');
381
+ const numInferenceStepInput = gradioEl.querySelector('#num-inference-step-slider input');
382
+ const imageSizeInput = gradioEl.querySelector('#image-size-slider input');
383
+ const seedInput = gradioEl.querySelector('#seed-slider input');
384
+
385
+ // Get the dropdown inputs
386
+ const modelDropdown = gradioEl.querySelector('#model-dropdown input');
387
+ const schedulerDropdown = gradioEl.querySelector('#scheduler-dropdown input');
388
+
389
+ // Set the values based on the parameters received
390
+ promptInput.value = parameters.text_prompt;
391
+ negativePromptInput.value = parameters.negative_prompt;
392
+ guidanceScaleInput.value = parameters.model_guidance_scale;
393
+ numInferenceStepInput.value = parameters.model_num_steps;
394
+ imageSizeInput.value = parameters.model_image_size;
395
+ seedInput.value = parameters.seed;
396
+ modelDropdown.value = parameters.model_name;
397
+ schedulerDropdown.value = parameters.scheduler_name;
398
+ }
399
+ """
400
+ return load_artwork_js
blocks/img2img.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionImg2ImgPipeline
4
+ from .utils.schedulers import SCHEDULER_LIST, get_scheduler_list
5
+ from .utils.prompt2prompt import generate
6
+ from .utils.device import get_device
7
+ from PIL import Image
8
+ from .download import get_share_js, get_community_loading_icon, CSS
9
+
10
+ IMG2IMG_MODEL_LIST = {
11
+ "StableDiffusion 1.5" : "runwayml/stable-diffusion-v1-5",
12
+ "StableDiffusion 2.1" : "stabilityai/stable-diffusion-2-1",
13
+ "OpenJourney v4" : "prompthero/openjourney-v4",
14
+ "DreamLike 1.0" : "dreamlike-art/dreamlike-diffusion-1.0",
15
+ "DreamLike 2.0" : "dreamlike-art/dreamlike-photoreal-2.0"
16
+ }
17
+
18
+ class StableDiffusionImage2ImageGenerator:
19
+ def __init__(self):
20
+ self.pipe = None
21
+
22
+ def load_model(self, model_path, scheduler):
23
+ model_path = IMG2IMG_MODEL_LIST[model_path]
24
+ if self.pipe is None:
25
+ self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
26
+ model_path, safety_checker=None, torch_dtype=torch.float32
27
+ )
28
+
29
+ device = get_device()
30
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
31
+
32
+ self.pipe.to(device)
33
+ #self.pipe.enable_attention_slicing()
34
+
35
+ return self.pipe
36
+
37
+ def generate_image(
38
+ self,
39
+ image_path: str,
40
+ model_path: str,
41
+ prompt: str,
42
+ negative_prompt: str,
43
+ num_images_per_prompt: int,
44
+ scheduler: str,
45
+ guidance_scale: int,
46
+ num_inference_step: int,
47
+ seed_generator=0,
48
+ ):
49
+ pipe = self.load_model(
50
+ model_path=model_path,
51
+ scheduler=scheduler,
52
+ )
53
+
54
+ if seed_generator == 0:
55
+ random_seed = torch.randint(0, 1000000, (1,))
56
+ generator = torch.manual_seed(random_seed)
57
+ else:
58
+ generator = torch.manual_seed(seed_generator)
59
+
60
+ image = Image.open(image_path)
61
+ images = pipe(
62
+ prompt,
63
+ image=image,
64
+ negative_prompt=negative_prompt,
65
+ num_images_per_prompt=num_images_per_prompt,
66
+ num_inference_steps=num_inference_step,
67
+ guidance_scale=guidance_scale,
68
+ generator=generator,
69
+ ).images
70
+
71
+ return images
72
+
73
+ def app():
74
+ demo = gr.Blocks(css=CSS)
75
+ with demo:
76
+ with gr.Row():
77
+ with gr.Column():
78
+ image2image_image_file = gr.Image(
79
+ type="filepath", label="Upload",elem_id="image-upload-img2img"
80
+ ).style(height=260)
81
+
82
+ image2image_prompt = gr.Textbox(
83
+ lines=1,
84
+ placeholder="Prompt",
85
+ show_label=False,
86
+ elem_id="prompt-text-input-img2img",
87
+ value=''
88
+ )
89
+
90
+ image2image_negative_prompt = gr.Textbox(
91
+ lines=1,
92
+ placeholder="Negative Prompt",
93
+ show_label=False,
94
+ elem_id = "negative-prompt-text-input-img2img",
95
+ value=''
96
+ )
97
+
98
+ # add button for generating a prompt from the prompt
99
+ image2image_generate_prompt_button = gr.Button(
100
+ label="Generate Prompt",
101
+ type="primary",
102
+ align="center",
103
+ value = "Generate Prompt"
104
+ )
105
+
106
+ # show a text box with the generated prompt
107
+ image2image_generated_prompt = gr.Textbox(
108
+ lines=1,
109
+ placeholder="Generated Prompt",
110
+ show_label=False,
111
+ )
112
+
113
+ with gr.Row():
114
+ with gr.Column():
115
+ image2image_model_path = gr.Dropdown(
116
+ choices=list(IMG2IMG_MODEL_LIST.keys()),
117
+ value=list(IMG2IMG_MODEL_LIST.keys())[0],
118
+ label="Imaget2Image Model Selection",
119
+ elem_id="model-dropdown-img2img",
120
+ )
121
+
122
+ image2image_guidance_scale = gr.Slider(
123
+ minimum=0.1,
124
+ maximum=15,
125
+ step=0.1,
126
+ value=7.5,
127
+ label="Guidance Scale",
128
+ elem_id = "guidance-scale-slider-img2img"
129
+ )
130
+
131
+ image2image_num_inference_step = gr.Slider(
132
+ minimum=1,
133
+ maximum=100,
134
+ step=1,
135
+ value=50,
136
+ label="Num Inference Step",
137
+ elem_id = "num-inference-step-slider-img2img"
138
+ )
139
+ with gr.Row():
140
+ with gr.Column():
141
+ image2image_scheduler = gr.Dropdown(
142
+ choices=SCHEDULER_LIST,
143
+ value=SCHEDULER_LIST[0],
144
+ label="Scheduler",
145
+ elem_id="scheduler-dropdown-img2img",
146
+ )
147
+ image2image_num_images_per_prompt = gr.Slider(
148
+ minimum=1,
149
+ maximum=30,
150
+ step=1,
151
+ value=1,
152
+ label="Number Of Images",
153
+ )
154
+
155
+ image2image_seed_generator = gr.Slider(
156
+ label="Seed(0 for random)",
157
+ minimum=0,
158
+ maximum=1000000,
159
+ value=0,
160
+ elem_id="seed-slider-img2img",
161
+ )
162
+
163
+ image2image_predict_button = gr.Button(value="Generator")
164
+
165
+ with gr.Column():
166
+ output_image = gr.Gallery(
167
+ label="Generated images",
168
+ show_label=False,
169
+ elem_id="gallery",
170
+ ).style(grid=(1, 2))
171
+
172
+ with gr.Group(elem_id="container-advanced-btns"):
173
+ with gr.Group(elem_id="share-btn-container"):
174
+ community_icon_html, loading_icon_html = get_community_loading_icon("img2img")
175
+ community_icon = gr.HTML(community_icon_html)
176
+ loading_icon = gr.HTML(loading_icon_html)
177
+ share_button = gr.Button("Save artwork", elem_id="share-btn-img2img")
178
+
179
+ image2image_predict_button.click(
180
+ fn=StableDiffusionImage2ImageGenerator().generate_image,
181
+ inputs=[
182
+ image2image_image_file,
183
+ image2image_model_path,
184
+ image2image_prompt,
185
+ image2image_negative_prompt,
186
+ image2image_num_images_per_prompt,
187
+ image2image_scheduler,
188
+ image2image_guidance_scale,
189
+ image2image_num_inference_step,
190
+ image2image_seed_generator,
191
+ ],
192
+ outputs=[output_image],
193
+ )
194
+
195
+ image2image_generate_prompt_button.click(
196
+ fn=generate,
197
+ inputs=[image2image_prompt],
198
+ outputs=[image2image_generated_prompt],
199
+ )
200
+
201
+
202
+ return demo
203
+
204
+
blocks/inpainting.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import DiffusionPipeline,StableDiffusionInpaintPipeline
3
+ import torch
4
+ from .utils.prompt2prompt import generate
5
+ from .utils.device import get_device
6
+ from .utils.schedulers import SCHEDULER_LIST, get_scheduler_list
7
+ from .download import get_share_js, CSS, get_community_loading_icon
8
+
9
+ INPAINT_MODEL_LIST = {
10
+ "Stable Diffusion 2" : "stabilityai/stable-diffusion-2-inpainting",
11
+ "Stable Diffusion 1" : "runwayml/stable-diffusion-inpainting",
12
+ }
13
+
14
+ class StableDiffusionInpaintGenerator:
15
+ def __init__(self):
16
+ self.pipe = None
17
+
18
+ def load_model(self, model_path, scheduler):
19
+ model_path = INPAINT_MODEL_LIST[model_path]
20
+ if self.pipe is None:
21
+ self.pipe = StableDiffusionInpaintPipeline.from_pretrained(
22
+ model_path, torch_dtype=torch.float32
23
+ )
24
+ device = get_device()
25
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
26
+ self.pipe.to(device)
27
+ #self.pipe.enable_attention_slicing()
28
+ return self.pipe
29
+
30
+ def generate_image(
31
+ self,
32
+ pil_image: str,
33
+ model_path: str,
34
+ prompt: str,
35
+ negative_prompt: str,
36
+ num_images_per_prompt: int,
37
+ scheduler: str,
38
+ guidance_scale: int,
39
+ num_inference_step: int,
40
+ height: int,
41
+ width: int,
42
+ seed_generator=0,
43
+ ):
44
+
45
+ image = pil_image["image"].convert("RGB").resize((width, height))
46
+ mask_image = pil_image["mask"].convert("RGB").resize((width, height))
47
+
48
+ pipe = self.load_model(model_path,scheduler)
49
+
50
+ if seed_generator == 0:
51
+ random_seed = torch.randint(0, 1000000, (1,))
52
+ generator = torch.manual_seed(random_seed)
53
+ else:
54
+ generator = torch.manual_seed(seed_generator)
55
+
56
+ output = pipe(
57
+ prompt=prompt,
58
+ image=image,
59
+ mask_image=mask_image,
60
+ negative_prompt=negative_prompt,
61
+ num_images_per_prompt=num_images_per_prompt,
62
+ num_inference_steps=num_inference_step,
63
+ guidance_scale=guidance_scale,
64
+ generator=generator,
65
+ ).images
66
+
67
+ return output
68
+
69
+
70
+ def app():
71
+ demo = gr.Blocks(css=CSS)
72
+ with demo:
73
+ with gr.Row():
74
+ with gr.Column():
75
+ stable_diffusion_inpaint_image_file = gr.Image(
76
+ source="upload",
77
+ tool="sketch",
78
+ elem_id="image-upload-inpainting",
79
+ type="pil",
80
+ label="Upload",
81
+
82
+ ).style(height=260)
83
+
84
+ stable_diffusion_inpaint_prompt = gr.Textbox(
85
+ lines=1,
86
+ placeholder="Prompt",
87
+ show_label=False,
88
+ elem_id="prompt-text-input-inpainting",
89
+ value=''
90
+ )
91
+
92
+ stable_diffusion_inpaint_negative_prompt = gr.Textbox(
93
+ lines=1,
94
+ placeholder="Negative Prompt",
95
+ show_label=False,
96
+ elem_id = "negative-prompt-text-input-inpainting",
97
+ value=''
98
+ )
99
+ # add button for generating a prompt from the prompt
100
+ stable_diffusion_inpaint_generate = gr.Button(
101
+ label="Generate Prompt",
102
+ type="primary",
103
+ align="center",
104
+ value = "Generate Prompt"
105
+ )
106
+
107
+ # show a text box with the generated prompt
108
+ stable_diffusion_inpaint_generated_prompt = gr.Textbox(
109
+ lines=1,
110
+ placeholder="Generated Prompt",
111
+ show_label=False,
112
+ )
113
+
114
+ stable_diffusion_inpaint_model_id = gr.Dropdown(
115
+ choices=list(INPAINT_MODEL_LIST.keys()),
116
+ value=list(INPAINT_MODEL_LIST.keys())[0],
117
+ label="Inpaint Model Selection",
118
+ elem_id="model-dropdown-inpainting",
119
+ )
120
+ with gr.Row():
121
+ with gr.Column():
122
+ stable_diffusion_inpaint_guidance_scale = gr.Slider(
123
+ minimum=0.1,
124
+ maximum=15,
125
+ step=0.1,
126
+ value=7.5,
127
+ label="Guidance Scale",
128
+ elem_id = "guidance-scale-slider-inpainting"
129
+ )
130
+
131
+ stable_diffusion_inpaint_num_inference_step = gr.Slider(
132
+ minimum=1,
133
+ maximum=100,
134
+ step=1,
135
+ value=50,
136
+ label="Num Inference Step",
137
+ elem_id = "num-inference-step-slider-inpainting"
138
+ )
139
+
140
+ stable_diffusion_inpiant_num_images_per_prompt = gr.Slider(
141
+ minimum=1,
142
+ maximum=10,
143
+ step=1,
144
+ value=1,
145
+ label="Number Of Images",
146
+ )
147
+
148
+ with gr.Row():
149
+ with gr.Column():
150
+ stable_diffusion_inpaint_scheduler = gr.Dropdown(
151
+ choices=SCHEDULER_LIST,
152
+ value=SCHEDULER_LIST[0],
153
+ label="Scheduler",
154
+ elem_id="scheduler-dropdown-inpainting",
155
+ )
156
+
157
+ stable_diffusion_inpaint_size = gr.Slider(
158
+ minimum=128,
159
+ maximum=1280,
160
+ step=32,
161
+ value=512,
162
+ label="Image Size",
163
+ elem_id="image-size-slider-inpainting",
164
+ )
165
+
166
+ stable_diffusion_inpaint_seed_generator = gr.Slider(
167
+ label="Seed(0 for random)",
168
+ minimum=0,
169
+ maximum=1000000,
170
+ value=0,
171
+ elem_id="seed-slider-inpainting",
172
+ )
173
+
174
+ stable_diffusion_inpaint_predict = gr.Button(
175
+ value="Generator"
176
+ )
177
+
178
+ with gr.Column():
179
+ output_image = gr.Gallery(
180
+ label="Generated images",
181
+ show_label=False,
182
+ elem_id="gallery-inpainting",
183
+ ).style(grid=(1, 2))
184
+
185
+ with gr.Group(elem_id="container-advanced-btns"):
186
+ with gr.Group(elem_id="share-btn-container"):
187
+ community_icon_html, loading_icon_html = get_community_loading_icon("inpainting")
188
+ community_icon = gr.HTML(community_icon_html)
189
+ loading_icon = gr.HTML(loading_icon_html)
190
+ share_button = gr.Button("Save artwork", elem_id="share-btn-inpainting")
191
+
192
+ stable_diffusion_inpaint_predict.click(
193
+ fn=StableDiffusionInpaintGenerator().generate_image,
194
+ inputs=[
195
+ stable_diffusion_inpaint_image_file,
196
+ stable_diffusion_inpaint_model_id,
197
+ stable_diffusion_inpaint_prompt,
198
+ stable_diffusion_inpaint_negative_prompt,
199
+ stable_diffusion_inpiant_num_images_per_prompt,
200
+ stable_diffusion_inpaint_scheduler,
201
+ stable_diffusion_inpaint_guidance_scale,
202
+ stable_diffusion_inpaint_num_inference_step,
203
+ stable_diffusion_inpaint_size,
204
+ stable_diffusion_inpaint_size,
205
+ stable_diffusion_inpaint_seed_generator,
206
+ ],
207
+ outputs=[output_image],
208
+ )
209
+
210
+ stable_diffusion_inpaint_generate.click(
211
+ fn=generate,
212
+ inputs=[stable_diffusion_inpaint_prompt],
213
+ outputs=[stable_diffusion_inpaint_generated_prompt],
214
+ )
215
+
216
+
217
+
218
+
219
+ return demo
blocks/main.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from .download import CSS
3
+ from .inpainting import StableDiffusionInpaintGenerator
4
+ from .text2img import StableDiffusionText2ImageGenerator
5
+ from .img2img import StableDiffusionImage2ImageGenerator
6
+
7
+ def main_box(username : str = "admin"):
8
+ """
9
+ Implement the main interface for the app which will be served
10
+ to the frontend.
11
+ """
12
+ # customize the share_js button by letting username
13
+ app = gr.Blocks(css = CSS)
14
+ with app:
15
+ with gr.Row():
16
+ with gr.Column():
17
+ with gr.Tab("Text-to-Image", id = 'text-to-image', elem_id='text-to-image-tab'):
18
+ StableDiffusionText2ImageGenerator.app()
19
+ with gr.Tab("Image-to-Image", id = 'image-to-image', elem_id='image-to-image-tab'):
20
+ StableDiffusionImage2ImageGenerator.app()
21
+ with gr.Tab("Inpainting", id = 'inpainting', elem_id = 'inpainting-tab'):
22
+ StableDiffusionInpaintGenerator.app()
23
+
24
+ # Add a footer that will be displayed at the bottom of the app
25
+
26
+ gr.HTML("""
27
+ <div style="text-align: center; font-size: 12px; margin-top: 10px; color: #999;">Minerva : Only your imagination is the limit!</div>
28
+ """)
29
+
30
+ app.queue(concurrency_count=2)
31
+ return app
blocks/text2img.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+ from .utils.schedulers import SCHEDULER_LIST, get_scheduler_list
5
+ from .utils.prompt2prompt import generate
6
+ from .utils.device import get_device
7
+ from .download import get_share_js, community_icon_html, loading_icon_html, CSS
8
+
9
+ #--- create a download button that takes the output image from gradio and downloads it
10
+
11
+ TEXT2IMG_MODEL_LIST = {
12
+ "OpenJourney v4" : "prompthero/openjourney-v4",
13
+ "StableDiffusion 1.5" : "runwayml/stable-diffusion-v1-5",
14
+ "StableDiffusion 2.1" : "stabilityai/stable-diffusion-2-1",
15
+ "DreamLike 1.0" : "dreamlike-art/dreamlike-diffusion-1.0",
16
+ "DreamLike 2.0" : "dreamlike-art/dreamlike-photoreal-2.0",
17
+ "DreamShaper" : "Lykon/DreamShaper",
18
+ "NeverEnding-Dream" : "Lykon/NeverEnding-Dream"
19
+ }
20
+
21
+ class StableDiffusionText2ImageGenerator:
22
+ def __init__(self):
23
+ self.pipe = None
24
+
25
+ def load_model(
26
+ self,
27
+ model_path,
28
+ scheduler
29
+ ):
30
+ model_path = TEXT2IMG_MODEL_LIST[model_path]
31
+ if self.pipe is None:
32
+ self.pipe = StableDiffusionPipeline.from_pretrained(
33
+ model_path, safety_checker=None, torch_dtype=torch.float32
34
+ )
35
+
36
+ device = get_device()
37
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
38
+ self.pipe.to(device)
39
+ #self.pipe.enable_attention_slicing()
40
+
41
+ return self.pipe
42
+
43
+ def generate_image(
44
+ self,
45
+ model_path: str,
46
+ prompt: str,
47
+ negative_prompt: str,
48
+ num_images_per_prompt: int,
49
+ scheduler: str,
50
+ guidance_scale: int,
51
+ num_inference_step: int,
52
+ height: int,
53
+ width: int,
54
+ seed_generator=0,
55
+ ):
56
+ print("model_path", model_path)
57
+ print("prompt", prompt)
58
+ print("negative_prompt", negative_prompt)
59
+ print("num_images_per_prompt", num_images_per_prompt)
60
+ print("scheduler", scheduler)
61
+ print("guidance_scale", guidance_scale)
62
+ print("num_inference_step", num_inference_step)
63
+ print("height", height)
64
+ print("width", width)
65
+ print("seed_generator", seed_generator)
66
+
67
+ pipe = self.load_model(
68
+ model_path=model_path,
69
+ scheduler=scheduler,
70
+ )
71
+ if seed_generator == 0:
72
+ random_seed = torch.randint(0, 1000000, (1,))
73
+ generator = torch.manual_seed(random_seed)
74
+ else:
75
+ generator = torch.manual_seed(seed_generator)
76
+
77
+ images = pipe(
78
+ prompt=prompt,
79
+ height=height,
80
+ width=width,
81
+ negative_prompt=negative_prompt,
82
+ num_images_per_prompt=num_images_per_prompt,
83
+ num_inference_steps=num_inference_step,
84
+ guidance_scale=guidance_scale,
85
+ generator=generator,
86
+ ).images
87
+
88
+ return images
89
+
90
+
91
+ def app(username : str = "admin"):
92
+ demo = gr.Blocks(css = CSS)
93
+ with demo:
94
+ with gr.Row():
95
+ with gr.Column():
96
+ text2image_prompt = gr.Textbox(
97
+ lines=1,
98
+ placeholder="Prompt",
99
+ show_label=False,
100
+ elem_id="prompt-text-input",
101
+ value=''
102
+ )
103
+
104
+ text2image_negative_prompt = gr.Textbox(
105
+ lines=1,
106
+ placeholder="Negative Prompt",
107
+ show_label=False,
108
+ elem_id = "negative-prompt-text-input",
109
+ value=''
110
+ )
111
+
112
+ # add button for generating a prompt from the prompt
113
+ text2image_prompt_generate_button = gr.Button(
114
+ label="Generate Prompt",
115
+ type="primary",
116
+ align="center",
117
+ value = "Generate Prompt"
118
+ )
119
+
120
+ # show a text box with the generated prompt
121
+ text2image_prompt_generated_prompt = gr.Textbox(
122
+ lines=1,
123
+ placeholder="Generated Prompt",
124
+ show_label=False,
125
+ )
126
+ with gr.Row():
127
+ with gr.Column():
128
+ text2image_model_path = gr.Dropdown(
129
+ choices=list(TEXT2IMG_MODEL_LIST.keys()),
130
+ value=list(TEXT2IMG_MODEL_LIST.keys())[0],
131
+ label="Text2Image Model Selection",
132
+ elem_id="model-dropdown",
133
+ )
134
+
135
+ text2image_guidance_scale = gr.Slider(
136
+ minimum=0.1,
137
+ maximum=15,
138
+ step=0.1,
139
+ value=7.5,
140
+ label="Guidance Scale",
141
+ elem_id = "guidance-scale-slider"
142
+ )
143
+
144
+ text2image_num_inference_step = gr.Slider(
145
+ minimum=1,
146
+ maximum=100,
147
+ step=1,
148
+ value=50,
149
+ label="Num Inference Step",
150
+ elem_id = "num-inference-step-slider"
151
+ )
152
+ text2image_num_images_per_prompt = gr.Slider(
153
+ minimum=1,
154
+ maximum=30,
155
+ step=1,
156
+ value=1,
157
+ label="Number Of Images",
158
+ )
159
+ with gr.Row():
160
+ with gr.Column():
161
+
162
+ text2image_scheduler = gr.Dropdown(
163
+ choices=SCHEDULER_LIST,
164
+ value=SCHEDULER_LIST[0],
165
+ label="Scheduler",
166
+ elem_id="scheduler-dropdown",
167
+ )
168
+
169
+ text2image_size = gr.Slider(
170
+ minimum=128,
171
+ maximum=1280,
172
+ step=32,
173
+ value=512,
174
+ label="Image Size",
175
+ elem_id="image-size-slider",
176
+ )
177
+
178
+ text2image_seed_generator = gr.Slider(
179
+ label="Seed(0 for random)",
180
+ minimum=0,
181
+ maximum=1000000,
182
+ value=0,
183
+ elem_id="seed-slider",
184
+ )
185
+ text2image_predict = gr.Button(value="Generator")
186
+
187
+ with gr.Column():
188
+ output_image = gr.Gallery(
189
+ label="Generated images",
190
+ show_label=False,
191
+ elem_id="gallery",
192
+ ).style(grid=(1, 2), height='auto')
193
+
194
+ with gr.Group(elem_id="container-advanced-btns"):
195
+ with gr.Group(elem_id="share-btn-container"):
196
+ community_icon = gr.HTML(community_icon_html)
197
+ loading_icon = gr.HTML(loading_icon_html)
198
+ share_button = gr.Button("Save artwork", elem_id="share-btn")
199
+
200
+
201
+ text2image_predict.click(
202
+ fn=StableDiffusionText2ImageGenerator().generate_image,
203
+ inputs=[
204
+ text2image_model_path,
205
+ text2image_prompt,
206
+ text2image_negative_prompt,
207
+ text2image_num_images_per_prompt,
208
+ text2image_scheduler,
209
+ text2image_guidance_scale,
210
+ text2image_num_inference_step,
211
+ text2image_size,
212
+ text2image_size,
213
+ text2image_seed_generator,
214
+ ],
215
+ outputs=output_image,
216
+ )
217
+
218
+ text2image_prompt_generate_button.click(
219
+ fn=generate,
220
+ inputs=[text2image_prompt],
221
+ outputs=[text2image_prompt_generated_prompt],
222
+ )
223
+
224
+ # share_button.click(
225
+ # None,
226
+ # [],
227
+ # [],
228
+ # _js=get_share_js(),
229
+ # )
230
+
231
+ # autoclik the share button
232
+
233
+
234
+
235
+ return demo
blocks/utils/__init__.py ADDED
File without changes
blocks/utils/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (171 Bytes). View file
 
blocks/utils/__pycache__/device.cpython-39.pyc ADDED
Binary file (443 Bytes). View file
 
blocks/utils/__pycache__/prompt2prompt.cpython-39.pyc ADDED
Binary file (1.03 kB). View file
 
blocks/utils/__pycache__/schedulers.cpython-39.pyc ADDED
Binary file (903 Bytes). View file
 
blocks/utils/device.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def get_device(device = None):
5
+ if device is None:
6
+ # get cuda -> mps -> cpu
7
+ if torch.cuda.is_available():
8
+ device = "cuda"
9
+ elif torch.backends.mps.is_available():
10
+ if torch.backends.mps.is_built():
11
+ device = "mps"
12
+ else:
13
+ device = "cpu"
14
+ else:
15
+ device = "cpu"
16
+ return device
blocks/utils/prompt2prompt.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ from transformers import pipeline, set_seed
3
+ import re
4
+ import random
5
+ gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
6
+
7
+ def generate(starting_text):
8
+ seed = random.randint(100, 1000000)
9
+ set_seed(seed)
10
+
11
+ response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
12
+ response_list = []
13
+ for x in response:
14
+ resp = x['generated_text'].strip()
15
+ if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
16
+ response_list.append(resp+'\n')
17
+
18
+ response_end = "\n".join(response_list)
19
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
20
+ response_end = response_end.replace("<", "").replace(">", "")
21
+
22
+ if response_end != "":
23
+ return response_end
blocks/utils/schedulers.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ DDIMScheduler,
3
+ EulerAncestralDiscreteScheduler,
4
+ EulerDiscreteScheduler,
5
+ HeunDiscreteScheduler,
6
+ LMSDiscreteScheduler,
7
+ DPMSolverMultistepScheduler
8
+ )
9
+
10
+ SCHEDULER_LIST = [
11
+ "DDIM",
12
+ "EulerA",
13
+ "Euler",
14
+ "LMS",
15
+ "Heun",
16
+ "DPMMultistep",
17
+ ]
18
+
19
+
20
+ def get_scheduler_list(pipe, scheduler):
21
+ if scheduler == SCHEDULER_LIST[0]:
22
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
23
+
24
+ elif scheduler == SCHEDULER_LIST[1]:
25
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
26
+ pipe.scheduler.config
27
+ )
28
+
29
+ elif scheduler == SCHEDULER_LIST[2]:
30
+ pipe.scheduler = EulerDiscreteScheduler.from_config(
31
+ pipe.scheduler.config
32
+ )
33
+
34
+ elif scheduler == SCHEDULER_LIST[3]:
35
+ pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
36
+
37
+ elif scheduler == SCHEDULER_LIST[4]:
38
+ pipe.scheduler = HeunDiscreteScheduler.from_config(
39
+ pipe.scheduler.config
40
+ )
41
+
42
+ elif scheduler == SCHEDULER_LIST[5]:
43
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
44
+ pipe.scheduler.config
45
+ )
46
+
47
+ return pipe
diffmodels/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .diffusion_utils import build_pipeline
2
+
3
+ NAME_TO_MODEL = {
4
+ "stable-diffusion-v1-4":
5
+ {
6
+ "model" : "CompVis/stable-diffusion-v1-4",
7
+ "unet" : "CompVis/stable-diffusion-v1-4",
8
+ "tokenizer" : "openai/clip-vit-large-patch14",
9
+ "text_encoder" : "openai/clip-vit-large-patch14",
10
+ },
11
+ "stable_diffusion_v2_1":
12
+ {
13
+ "model" : "stabilityai/stable-diffusion-2-1",
14
+ "unet" : "stabilityai/stable-diffusion-2-1",
15
+ "tokenizer" : "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
16
+ "text_encoder" : "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
17
+ }
18
+ }
19
+
20
+ def get_model(model_name):
21
+ model = NAME_TO_MODEL.get(model_name)
22
+ if model is None:
23
+ raise ValueError(f"Model name {model_name} not found. Available models: {list(NAME_TO_MODEL.keys())}")
24
+ vae, tokenizer, text_encoder, unet = build_pipeline(model["model"], model["tokenizer"], model["text_encoder"], model["unet"])
25
+ return vae, tokenizer, text_encoder, unet
diffmodels/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (853 Bytes). View file
 
diffmodels/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (852 Bytes). View file
 
diffmodels/__pycache__/diffusion_utils.cpython-310.pyc ADDED
Binary file (5.26 kB). View file
 
diffmodels/__pycache__/diffusion_utils.cpython-39.pyc ADDED
Binary file (5.2 kB). View file
 
diffmodels/__pycache__/simple_diffusion.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
diffmodels/diffusion_utils.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Utility class for loading and using diffusers model
2
+ import diffusers
3
+ import transformers
4
+
5
+ import torch
6
+ from typing import Union
7
+ import os
8
+ import warnings
9
+ import numpy as np
10
+ from PIL import Image
11
+ import tqdm
12
+ from copy import deepcopy
13
+ import matplotlib.pyplot as plt
14
+
15
+ def build_generator(
16
+ device : torch.device,
17
+ seed : int,
18
+ ):
19
+ """
20
+ Build a torch.Generator with a given seed.
21
+ """
22
+ generator = torch.Generator(device).manual_seed(seed)
23
+ return generator
24
+
25
+ def load_stablediffusion_model(
26
+ model_id : Union[str, os.PathLike],
27
+ device : torch.device,
28
+ ):
29
+ """
30
+ Load a complete diffusion model from a model id.
31
+ Returns a tuple of the model and a torch.Generator if seed is not None.
32
+
33
+ """
34
+ pipe = diffusers.DiffusionPipeline.from_pretrained(
35
+ model_id,
36
+ revision="fp16",
37
+ torch_dtype=torch.float16,
38
+ use_auth_token=True,
39
+ )
40
+ pipe.scheduler = diffusers.DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
41
+ try:
42
+ pipe = pipe.to(device)
43
+ except:
44
+ warnings.warn(
45
+ f'Could not load model to device:{device}. Using CPU instead.'
46
+ )
47
+ pipe = pipe.to('cpu')
48
+ device = 'cpu'
49
+
50
+ return pipe
51
+
52
+
53
+ def visualize_image_grid(
54
+ imgs : np.array,
55
+ rows : int,
56
+ cols : int):
57
+
58
+ assert len(imgs) == rows*cols
59
+
60
+ # create grid
61
+ w, h = imgs[0].size # assuming each image is the same size
62
+
63
+ grid = Image.new('RGB', size=(cols*w, rows*h))
64
+
65
+ for i,img in enumerate(imgs):
66
+ grid.paste(img, box=(i%cols*w, i//cols*h))
67
+ return grid
68
+
69
+
70
+ def build_pipeline(
71
+ autoencoder : Union[str, os.PathLike] = "CompVis/stable-diffusion-v1-4",
72
+ tokenizer : Union[str, os.PathLike] = "openai/clip-vit-large-patch14",
73
+ text_encoder : Union[str, os.PathLike] = "openai/clip-vit-large-patch14",
74
+ unet : Union[str, os.PathLike] = "CompVis/stable-diffusion-v1-4",
75
+ device : torch.device = torch.device('cuda'),
76
+ ):
77
+ """
78
+ Create a pipeline for StableDiffusion by loading the model and component seperetely.
79
+ Arguments:
80
+ autoencoder: path to model that autoencoder will be loaded from
81
+ tokenizer: path to tokenizer
82
+ text_encoder: path to text_encoder
83
+ unet: path to unet
84
+ """
85
+ # Load the VAE for encoding images into the latent space
86
+ vae = diffusers.AutoencoderKL.from_pretrained(autoencoder, subfolder = 'vae')
87
+
88
+ # Load tokenizer & text encoder for encoding text into the latent space
89
+ tokenizer = transformers.CLIPTokenizer.from_pretrained(tokenizer)
90
+ text_encoder = transformers.CLIPTextModel.from_pretrained(text_encoder)
91
+
92
+ # Use the UNet model for conditioning the diffusion process
93
+ unet = diffusers.UNet2DConditionModel.from_pretrained(unet, subfolder = 'unet')
94
+
95
+ # Move all the components to device
96
+ vae = vae.to(device)
97
+ text_encoder = text_encoder.to(device)
98
+ unet = unet.to(device)
99
+
100
+ return vae, tokenizer, text_encoder, unet
101
+
102
+ #TODO : Add negative prompting
103
+ def custom_stablediffusion_inference(
104
+ vae,
105
+ tokenizer,
106
+ text_encoder,
107
+ unet,
108
+ noise_scheduler,
109
+ prompt : list,
110
+ device : torch.device,
111
+ num_inference_steps = 100,
112
+ image_size = (512,512),
113
+ guidance_scale = 8,
114
+ seed = 42,
115
+ return_image_step = 5,
116
+ ):
117
+ # Get the text embeddings that will condition the diffusion process
118
+ if isinstance(prompt,str):
119
+ prompt = [prompt]
120
+
121
+ batch_size = len(prompt)
122
+ text_input = tokenizer(
123
+ prompt,
124
+ padding = 'max_length',
125
+ truncation = True,
126
+ max_length = tokenizer.model_max_length,
127
+ return_tensors = 'pt').to(device)
128
+
129
+ text_embeddings = text_encoder(
130
+ text_input.input_ids.to(device)
131
+ )[0]
132
+
133
+ # Get the text embeddings for classifier-free guidance
134
+ max_length = text_input.input_ids.shape[-1]
135
+ empty = [""] * batch_size
136
+ uncond_input = tokenizer(
137
+ empty,
138
+ padding = 'max_length',
139
+ truncation = True,
140
+ max_length = max_length,
141
+ return_tensors = 'pt').to(device)
142
+
143
+ uncond_embeddings = text_encoder(
144
+ uncond_input.input_ids.to(device)
145
+ )[0]
146
+
147
+ # Concatenate the text embeddings to get the conditioning vector
148
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
149
+
150
+ # Generate initial noise
151
+ latents = torch.randn(
152
+ (1, unet.in_channels, image_size[0] // 8, image_size[1] // 8),
153
+ generator=torch.manual_seed(seed) if seed is not None else None
154
+ )
155
+ print(latents.shape)
156
+
157
+ latents = latents.to(device)
158
+
159
+ # Initialize scheduler for noise generation
160
+ noise_scheduler.set_timesteps(num_inference_steps)
161
+
162
+ latents = latents * noise_scheduler.init_noise_sigma
163
+
164
+ noise_scheduler.set_timesteps(num_inference_steps)
165
+ for i,t in tqdm.tqdm(enumerate(noise_scheduler.timesteps)):
166
+ # If no text embedding is provided (classifier-free guidance), extend the conditioning vector
167
+ latent_model_input = torch.cat([latents] * 2)
168
+
169
+ latent_model_input = noise_scheduler.scale_model_input(latent_model_input, timestep=t)
170
+
171
+ with torch.no_grad():
172
+ # Get the noise prediction from the UNet
173
+ noise_pred = unet(latent_model_input, t, encoder_hidden_states = text_embeddings).sample
174
+
175
+ # Perform guidance from the text embeddings
176
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
177
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
178
+
179
+ # Compute the previously noisy sample x_t -> x_t-1
180
+ latents = noise_scheduler.step(noise_pred, t, latents).prev_sample
181
+
182
+ # Now that latent is generated from a noise, use unet decoder to generate images
183
+ if i % return_image_step == 0:
184
+ with torch.no_grad():
185
+ latents_copy = deepcopy(latents)
186
+ image = vae.decode(1/0.18215 * latents_copy).sample
187
+
188
+ image = (image / 2 + 0.5).clamp(0,1)
189
+ image = image.detach().cpu().permute(0,2,3,1).numpy() # bxhxwxc
190
+ images = (image * 255).round().astype("uint8")
191
+
192
+ pil_images = [Image.fromarray(img) for img in images]
193
+
194
+ yield pil_images[0]
195
+
196
+ yield pil_images[0]
197
+
198
+ if __name__ == "__main__":
199
+ device = torch.device("cpu")
200
+ model_id = "stabilityai/stable-diffusion-2-1"
201
+ tokenizer_id = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
202
+ #noise_scheduler = diffusers.LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
203
+ noise_scheduler = diffusers.DPMSolverMultistepScheduler.from_pretrained(model_id,subfolder="scheduler")
204
+ prompt = "A Hyperrealistic photograph of Italian architectural modern home in Italy, lens flares,\
205
+ cinematic, hdri, matte painting, concept art, celestial, soft render, highly detailed, octane\
206
+ render, architectural HD, HQ, 4k, 8k"
207
+
208
+ vae, tokenizer, text_encoder, unet = build_pipeline(
209
+ autoencoder = model_id,
210
+ tokenizer=tokenizer_id,
211
+ text_encoder=tokenizer_id,
212
+ unet=model_id,
213
+ device=device,
214
+ )
215
+ image_iter = custom_stablediffusion_inference(vae, tokenizer, text_encoder, unet, noise_scheduler, prompt = prompt, device=device, seed = None)
216
+ for i, image in enumerate(image_iter):
217
+ image.save(f"step_{i}.png")
218
+
diffmodels/simple_diffusion.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import diffusers
2
+ import transformers
3
+ import utils.log
4
+ import torch
5
+ import PIL
6
+ from typing import Union, Dict, Any, Optional, List, Tuple, Callable
7
+ import os
8
+ import re
9
+
10
+ class SimpleDiffusion(diffusers.DiffusionPipeline):
11
+ """
12
+ An unified interface for diffusion models. This allow us to use :
13
+ - txt2img
14
+ - img2img
15
+ - inpainting
16
+ - unconditional image generation
17
+
18
+ This class is highly inspired from the Stable-Diffusion-Mega pipeline.
19
+ DiffusionPipeline class allow us to load/download all the models hubbed by HuggingFace with an ease. Read more information
20
+ about the DiffusionPipeline class here: https://huggingface.co/transformers/main_classes/pipelines.html#transformers.DiffusionPipeline
21
+
22
+ Args:
23
+ logger (:obj:`utils.log.Logger`):
24
+ The logger to use for logging any information.
25
+ vae ([`AutoencoderKL`]):
26
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
27
+ text_encoder ([`CLIPTextModel`]):
28
+ Frozen text-encoder. Stable Diffusion uses the text portion of
29
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
30
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
31
+ tokenizer (`CLIPTokenizer`):
32
+ Tokenizer of class
33
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
34
+ unet ([`UNet2DConditionModel`]):
35
+ Conditional U-Net architecture to denoise the encoded image latents.
36
+ scheduler ([`SchedulerMixin`]):
37
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
38
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
39
+ safety_checker ([`StableDiffusionMegaSafetyChecker`]):
40
+ Classification module that estimates whether generated images could be considered offensive or harmful.
41
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
42
+ feature_extractor ([`CLIPFeatureExtractor`]):
43
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
44
+
45
+ """
46
+ def __init__(
47
+ self,
48
+ vae: diffusers.AutoencoderKL,
49
+ text_encoder: transformers.CLIPTextModel,
50
+ tokenizer: transformers.CLIPTokenizer,
51
+ unet: diffusers.UNet2DConditionModel,
52
+ scheduler: Union[diffusers.DDIMScheduler, diffusers.PNDMScheduler, diffusers.LMSDiscreteScheduler],
53
+ safety_checker: diffusers.pipelines.stable_diffusion.safety_checker.StableDiffusionSafetyChecker,
54
+ feature_extractor: transformers.CLIPFeatureExtractor,
55
+ prompt_generation = "succinctly/text2image-prompt-generator"
56
+ ):
57
+ super().__init__()
58
+ self._logger = None
59
+ self.register_modules( # already defined in ConfigMixin class, from_pretrained loads these modules
60
+ vae=vae,
61
+ text_encoder=text_encoder,
62
+ tokenizer=tokenizer,
63
+ unet=unet,
64
+ scheduler=scheduler,
65
+ safety_checker=safety_checker,
66
+ feature_extractor=feature_extractor,
67
+
68
+ )
69
+ self._generated_prompts = []
70
+ self._enable_prompt_generation = False
71
+ if prompt_generation:
72
+ self._enable_prompt_generation = True
73
+ self._prompt_generator = transformers.pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
74
+
75
+ def _generate_prompt(self, prompt, **kwargs):
76
+ """
77
+ Generate a prompt from a given text.
78
+ Args:
79
+ prompt (str): The text to generate a prompt from.
80
+ **kwargs: Additional keyword arguments passed to the prompt generator pipeline.
81
+ """
82
+ max_length = kwargs.pop("max_length", None)
83
+ num_return_sequences = kwargs.pop("num_return_sequences", None)
84
+
85
+ prompt = self._prompt_generator(prompt, max_length=max_length, num_return_sequences=num_return_sequences)
86
+ prompt = self._process_prompt(prompt, **kwargs)
87
+ return prompt[0]['generated_text']
88
+
89
+ def _process_prompt(self,original_prompt, prompt_list):
90
+ # TODO : Add documentation; add more prompt processing
91
+ response_list = []
92
+ for x in prompt_list:
93
+ resp = x['generated_text'].strip()
94
+ if resp != original_prompt and len(resp) > (len(original_prompt) + 4) and resp.endswith((":", "-", "—")) is False:
95
+ response_list.append(resp+'\n')
96
+
97
+ response_end = "\n".join(response_list)
98
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
99
+ response_end = response_end.replace("<", "").replace(">", "")
100
+
101
+ if response_end != "":
102
+ return response_end
103
+
104
+ # Following components are required for the DiffusionPipeline class - but they exist in the StableDiffusionModel class
105
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
106
+ r"""
107
+ Enable sliced attention computation.
108
+ Refer to the [StableDiffusionModel](https://github.com/huggingface/diffusers/blob/main/examples/community/stable_diffusion_mega.py) repo
109
+ for more information.
110
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
111
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
112
+ Args:
113
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
114
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
115
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
116
+ `attention_head_dim` must be a multiple of `slice_size`.
117
+ """
118
+ if slice_size == "auto":
119
+ # half the attention head size is usually a good trade-off between
120
+ # speed and memory
121
+ if self._logger is not None:
122
+ self._logger.info("Attention slicing enabled!")
123
+ slice_size = self.unet.config.attention_head_dim // 2
124
+ self.unet.set_attention_slice(slice_size)
125
+
126
+ def disable_attention_slicing(self):
127
+ r"""
128
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
129
+ back to computing attention in one step.
130
+ """
131
+ if self._logger is not None:
132
+ self._logger.info("Attention slicing disabled!")
133
+ self.enable_attention_slicing(None)
134
+
135
+ def set_logger(self, logger):
136
+ r"""
137
+ Set logger. This is useful to log information about the model.
138
+ """
139
+ self._logger = logger
140
+
141
+ @property
142
+ def components(self) -> Dict[str, Any]:
143
+ # Return the non-private variables
144
+ return {k : getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
145
+
146
+ @torch.no_grad()
147
+ def inpaint(
148
+ self,
149
+ prompt: Union[str, List[str]],
150
+ init_image: Union[torch.FloatTensor, PIL.Image.Image],
151
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
152
+ strength: float = 0.8,
153
+ num_inference_steps: Optional[int] = 50,
154
+ guidance_scale: Optional[float] = 7.5,
155
+ negative_prompt: Optional[Union[str, List[str]]] = None,
156
+ num_images_per_prompt: Optional[int] = 1,
157
+ eta: Optional[float] = 0.0,
158
+ generator: Optional[torch.Generator] = None,
159
+ output_type: Optional[str] = "pil",
160
+ return_dict: bool = True,
161
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
162
+ callback_steps: Optional[int] = 1,
163
+ **kwargs,
164
+ ):
165
+ if self._enable_prompt_generation:
166
+ prompt = self._generate_prompt(p, **kwargs)[0]
167
+ self._logger.info(f"Generated prompt: {prompt}")
168
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
169
+ return diffusers.StableDiffusionInpaintPipelineLegacy(**self.components)(
170
+ prompt=prompt,
171
+ init_image=init_image,
172
+ mask_image=mask_image,
173
+ strength=strength,
174
+ num_inference_steps=num_inference_steps,
175
+ guidance_scale=guidance_scale,
176
+ negative_prompt=negative_prompt,
177
+ num_images_per_prompt=num_images_per_prompt,
178
+ eta=eta,
179
+ generator=generator,
180
+ output_type=output_type,
181
+ return_dict=return_dict,
182
+ callback=callback,
183
+ )
184
+
185
+ @torch.no_grad()
186
+ def img2img(
187
+ self,
188
+ prompt: Union[str, List[str]],
189
+ init_image: Union[torch.FloatTensor, PIL.Image.Image],
190
+ strength: float = 0.8,
191
+ num_inference_steps: Optional[int] = 50,
192
+ guidance_scale: Optional[float] = 7.5,
193
+ negative_prompt: Optional[Union[str, List[str]]] = None,
194
+ num_images_per_prompt: Optional[int] = 1,
195
+ eta: Optional[float] = 0.0,
196
+ generator: Optional[torch.Generator] = None,
197
+ output_type: Optional[str] = "pil",
198
+ return_dict: bool = True,
199
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
200
+ callback_steps: Optional[int] = 1,
201
+ **kwargs,
202
+ ):
203
+ if self._enable_prompt_generation:
204
+ prompt = self._generate_prompt(p, **kwargs)[0]
205
+ self._logger.info(f"Generated prompt: {prompt}")
206
+ # For more information on how this function works, please see: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionImg2ImgPipeline
207
+ return diffusers.StableDiffusionImg2ImgPipeline(**self.components)(
208
+ prompt=prompt,
209
+ init_image=init_image,
210
+ strength=strength,
211
+ num_inference_steps=num_inference_steps,
212
+ guidance_scale=guidance_scale,
213
+ negative_prompt=negative_prompt,
214
+ num_images_per_prompt=num_images_per_prompt,
215
+ eta=eta,
216
+ generator=generator,
217
+ output_type=output_type,
218
+ return_dict=return_dict,
219
+ callback=callback,
220
+ callback_steps=callback_steps,
221
+ )
222
+
223
+ @torch.no_grad()
224
+ def text2img(
225
+ self,
226
+ prompt: Union[str, List[str]],
227
+ height: int = 512,
228
+ width: int = 512,
229
+ num_inference_steps: int = 50,
230
+ guidance_scale: float = 7.5,
231
+ negative_prompt: Optional[Union[str, List[str]]] = None,
232
+ num_images_per_prompt: Optional[int] = 1,
233
+ eta: float = 0.0,
234
+ generator: Optional[torch.Generator] = None,
235
+ latents: Optional[torch.FloatTensor] = None,
236
+ output_type: Optional[str] = "pil",
237
+ return_dict: bool = True,
238
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
239
+ callback_steps: Optional[int] = 1,
240
+ ):
241
+ if self._enable_prompt_generation:
242
+ prompt = self._generate_prompt(p, **kwargs)[0]
243
+ self._logger.info(f"Generated prompt: {prompt}")
244
+
245
+ # For more information on how this function https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion#diffusers.StableDiffusionPipeline
246
+ return diffusers.StableDiffusionPipeline(**self.components)(
247
+ prompt=prompt,
248
+ height=height,
249
+ width=width,
250
+ num_inference_steps=num_inference_steps,
251
+ guidance_scale=guidance_scale,
252
+ negative_prompt=negative_prompt,
253
+ num_images_per_prompt=num_images_per_prompt,
254
+ eta=eta,
255
+ generator=generator,
256
+ latents=latents,
257
+ output_type=output_type,
258
+ return_dict=return_dict,
259
+ callback=callback,
260
+ callback_steps=callback_steps,
261
+ )
262
+
263
+ @torch.no_grad()
264
+ def upscale(
265
+ self,
266
+ prompt: Union[str, List[str]],
267
+ init_image: Union[torch.FloatTensor, PIL.Image.Image],
268
+ num_inference_steps: Optional[int] = 75,
269
+ guidance_scale: Optional[float] = 9.0,
270
+ negative_prompt: Optional[Union[str, List[str]]] = None,
271
+ num_images_per_prompt: Optional[int] = 1,
272
+ eta: Optional[float] = 0.0,
273
+ generator: Optional[torch.Generator] = None,
274
+ latents: Optional[torch.FloatTensor] = None,
275
+ output_type: Optional[str] = "pil",
276
+ return_dict: bool = True,
277
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
278
+ callback_steps: Optional[int] = 1,
279
+ ):
280
+ """
281
+ Upscale an image using the StableDiffusionUpscalePipeline.
282
+ """
283
+ if self._enable_prompt_generation:
284
+ prompt = self._generate_prompt(p, **kwargs)[0]
285
+ self._logger.info(f"Generated prompt: {prompt}")
286
+
287
+ return diffusers.StableDiffusionUpscalePipeline(**self.components)(
288
+ prompt=prompt,
289
+ image=init_image,
290
+ num_inference_steps=num_inference_steps,
291
+ guidance_scale=guidance_scale,
292
+ negative_prompt=negative_prompt,
293
+ num_images_per_prompt=num_images_per_prompt,
294
+ eta=eta,
295
+ generator=generator,
296
+ latents=latents,
297
+ output_type = output_type,
298
+ return_dict=return_dict,
299
+ callback=callback,
300
+ callback_steps=callback_steps)
301
+
302
+ def set_scheduler(self, scheduler: Union[diffusers.DDIMScheduler, diffusers.PNDMScheduler, diffusers.LMSDiscreteScheduler, diffusers.EulerDiscreteScheduler]):
303
+ """
304
+ Set the scheduler for the pipeline. This is useful for controlling the diffusion process.
305
+ Args:
306
+ scheduler (Union[diffusers.DDIMScheduler, diffusers.PNDMScheduler, diffusers.LMSDiscreteScheduler]): The scheduler to use.
307
+
308
+ """
309
+ self.components["scheduler"] = scheduler
diffmodels/textual_inversion.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #@title Import required libraries
2
+ import argparse
3
+ import itertools
4
+ import math
5
+ import os
6
+ import random
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from torch.utils.data import Dataset
13
+
14
+ import PIL
15
+ from accelerate import Accelerator
16
+ from accelerate.logging import get_logger
17
+ from accelerate.utils import set_seed
18
+ from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel
19
+ from diffusers.optimization import get_scheduler
20
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
21
+ from PIL import Image
22
+ from torchvision import transforms
23
+ from tqdm.auto import tqdm
24
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
25
+
26
+ pretrained_model_name_or_path = "stabilityai/stable-diffusion-2" #@param ["stabilityai/stable-diffusion-2", "stabilityai/stable-diffusion-2-base", "CompVis/stable-diffusion-v1-4", "runwayml/stable-diffusion-v1-5"] {allow-input: true}
27
+
28
+ # example image urls
29
+ urls = [
30
+ "https://huggingface.co/datasets/valhalla/images/resolve/main/2.jpeg",
31
+ "https://huggingface.co/datasets/valhalla/images/resolve/main/3.jpeg",
32
+ "https://huggingface.co/datasets/valhalla/images/resolve/main/5.jpeg",
33
+ "https://huggingface.co/datasets/valhalla/images/resolve/main/6.jpeg",
34
+ ]
35
+
36
+ # what is it that you are teaching? `object` enables you to teach the model a new object to be used, `style` allows you to teach the model a new style one can use.
37
+ what_to_teach = "object" #@param ["object", "style"]
38
+ # the token you are going to use to represent your new concept (so when you prompt the model, you will say "A `<my-placeholder-token>` in an amusement park"). We use angle brackets to differentiate a token from other words/tokens, to avoid collision.
39
+ placeholder_token = "<cat-toy>" #@param {type:"string"}
40
+ # is a word that can summarise what your new concept is, to be used as a starting point
41
+ initializer_token = "toy" #@param {type:"string"}
42
+
43
+ def image_grid(imgs, rows, cols):
44
+ assert len(imgs) == rows*cols
45
+
46
+ w, h = imgs[0].size
47
+ grid = Image.new('RGB', size=(cols*w, rows*h))
48
+ grid_w, grid_h = grid.size
49
+
50
+ for i, img in enumerate(imgs):
51
+ grid.paste(img, box=(i%cols*w, i//cols*h))
52
+ return grid
53
+
54
+ #@title Setup the prompt templates for training
55
+ imagenet_templates_small = [
56
+ "a photo of a {}",
57
+ "a rendering of a {}",
58
+ "a cropped photo of the {}",
59
+ "the photo of a {}",
60
+ "a photo of a clean {}",
61
+ "a photo of a dirty {}",
62
+ "a dark photo of the {}",
63
+ "a photo of my {}",
64
+ "a photo of the cool {}",
65
+ "a close-up photo of a {}",
66
+ "a bright photo of the {}",
67
+ "a cropped photo of a {}",
68
+ "a photo of the {}",
69
+ "a good photo of the {}",
70
+ "a photo of one {}",
71
+ "a close-up photo of the {}",
72
+ "a rendition of the {}",
73
+ "a photo of the clean {}",
74
+ "a rendition of a {}",
75
+ "a photo of a nice {}",
76
+ "a good photo of a {}",
77
+ "a photo of the nice {}",
78
+ "a photo of the small {}",
79
+ "a photo of the weird {}",
80
+ "a photo of the large {}",
81
+ "a photo of a cool {}",
82
+ "a photo of a small {}",
83
+ ]
84
+
85
+ imagenet_style_templates_small = [
86
+ "a painting in the style of {}",
87
+ "a rendering in the style of {}",
88
+ "a cropped painting in the style of {}",
89
+ "the painting in the style of {}",
90
+ "a clean painting in the style of {}",
91
+ "a dirty painting in the style of {}",
92
+ "a dark painting in the style of {}",
93
+ "a picture in the style of {}",
94
+ "a cool painting in the style of {}",
95
+ "a close-up painting in the style of {}",
96
+ "a bright painting in the style of {}",
97
+ "a cropped painting in the style of {}",
98
+ "a good painting in the style of {}",
99
+ "a close-up painting in the style of {}",
100
+ "a rendition in the style of {}",
101
+ "a nice painting in the style of {}",
102
+ "a small painting in the style of {}",
103
+ "a weird painting in the style of {}",
104
+ "a large painting in the style of {}",
105
+ ]
106
+
107
+ #@title Setup the dataset
108
+ class TextualInversionDataset(Dataset):
109
+ def __init__(
110
+ self,
111
+ data_root,
112
+ tokenizer,
113
+ learnable_property="object", # [object, style]
114
+ size=512,
115
+ repeats=100,
116
+ interpolation="bicubic",
117
+ flip_p=0.5,
118
+ set="train",
119
+ placeholder_token="*",
120
+ center_crop=False,
121
+ ):
122
+
123
+ self.data_root = data_root
124
+ self.tokenizer = tokenizer
125
+ self.learnable_property = learnable_property
126
+ self.size = size
127
+ self.placeholder_token = placeholder_token
128
+ self.center_crop = center_crop
129
+ self.flip_p = flip_p
130
+
131
+ self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
132
+
133
+ self.num_images = len(self.image_paths)
134
+ self._length = self.num_images
135
+
136
+ if set == "train":
137
+ self._length = self.num_images * repeats
138
+
139
+ self.interpolation = {
140
+ "linear": PIL.Image.LINEAR,
141
+ "bilinear": PIL.Image.BILINEAR,
142
+ "bicubic": PIL.Image.BICUBIC,
143
+ "lanczos": PIL.Image.LANCZOS,
144
+ }[interpolation]
145
+
146
+ self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
147
+ self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
148
+
149
+ def __len__(self):
150
+ return self._length
151
+
152
+ def __getitem__(self, i):
153
+ example = {}
154
+ image = Image.open(self.image_paths[i % self.num_images])
155
+
156
+ if not image.mode == "RGB":
157
+ image = image.convert("RGB")
158
+
159
+ placeholder_string = self.placeholder_token
160
+ text = random.choice(self.templates).format(placeholder_string)
161
+
162
+ example["input_ids"] = self.tokenizer(
163
+ text,
164
+ padding="max_length",
165
+ truncation=True,
166
+ max_length=self.tokenizer.model_max_length,
167
+ return_tensors="pt",
168
+ ).input_ids[0]
169
+
170
+ # default to score-sde preprocessing
171
+ img = np.array(image).astype(np.uint8)
172
+
173
+ if self.center_crop:
174
+ crop = min(img.shape[0], img.shape[1])
175
+ h, w, = (
176
+ img.shape[0],
177
+ img.shape[1],
178
+ )
179
+ img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
180
+
181
+ image = Image.fromarray(img)
182
+ image = image.resize((self.size, self.size), resample=self.interpolation)
183
+
184
+ image = self.flip_transform(image)
185
+ image = np.array(image).astype(np.uint8)
186
+ image = (image / 127.5 - 1.0).astype(np.float32)
187
+
188
+ example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
189
+ return example
190
+
191
+
192
+ #@title Load the tokenizer and add the placeholder token as a additional special token.
193
+ tokenizer = CLIPTokenizer.from_pretrained(
194
+ pretrained_model_name_or_path,
195
+ subfolder="tokenizer",
196
+ )
197
+
198
+ # Add the placeholder token in tokenizer
199
+ num_added_tokens = tokenizer.add_tokens(placeholder_token)
200
+ if num_added_tokens == 0:
201
+ raise ValueError(
202
+ f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
203
+ " `placeholder_token` that is not already in the tokenizer."
204
+ )
205
+
206
+
207
+
208
+ #@title Get token ids for our placeholder and initializer token. This code block will complain if initializer string is not a single token
209
+ # Convert the initializer_token, placeholder_token to ids
210
+ token_ids = tokenizer.encode(initializer_token, add_special_tokens=False)
211
+ # Check if initializer_token is a single token or a sequence of tokens
212
+ if len(token_ids) > 1:
213
+ raise ValueError("The initializer token must be a single token.")
214
+
215
+ initializer_token_id = token_ids[0]
216
+ placeholder_token_id = tokenizer.convert_tokens_to_ids(placeholder_token)
217
+
218
+
219
+ #@title Load the Stable Diffusion model
220
+ # Load models and create wrapper for stable diffusion
221
+ # pipeline = StableDiffusionPipeline.from_pretrained(pretrained_model_name_or_path)
222
+ # del pipeline
223
+ text_encoder = CLIPTextModel.from_pretrained(
224
+ pretrained_model_name_or_path, subfolder="text_encoder"
225
+ )
226
+ vae = AutoencoderKL.from_pretrained(
227
+ pretrained_model_name_or_path, subfolder="vae"
228
+ )
229
+ unet = UNet2DConditionModel.from_pretrained(
230
+ pretrained_model_name_or_path, subfolder="unet"
231
+ )
232
+
233
+ text_encoder.resize_token_embeddings(len(tokenizer))
234
+
235
+ token_embeds = text_encoder.get_input_embeddings().weight.data
236
+ token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
237
+
238
+ def freeze_params(params):
239
+ for param in params:
240
+ param.requires_grad = False
241
+
242
+ # Freeze vae and unet
243
+ freeze_params(vae.parameters())
244
+ freeze_params(unet.parameters())
245
+ # Freeze all parameters except for the token embeddings in text encoder
246
+ params_to_freeze = itertools.chain(
247
+ text_encoder.text_model.encoder.parameters(),
248
+ text_encoder.text_model.final_layer_norm.parameters(),
249
+ text_encoder.text_model.embeddings.position_embedding.parameters(),
250
+ )
251
+ freeze_params(params_to_freeze)
252
+
253
+ train_dataset = TextualInversionDataset(
254
+ data_root=save_path,
255
+ tokenizer=tokenizer,
256
+ size=vae.sample_size,
257
+ placeholder_token=placeholder_token,
258
+ repeats=100,
259
+ learnable_property=what_to_teach, #Option selected above between object and style
260
+ center_crop=False,
261
+ set="train",
262
+ )
263
+
264
+ def create_dataloader(train_batch_size=1):
265
+ return torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)
266
+
267
+ noise_scheduler = DDPMScheduler.from_config(pretrained_model_name_or_path, subfolder="scheduler")
268
+
269
+ # TODO: Add training scripts
image_0.png ADDED
requirements.txt ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.19.0
2
+ aiofiles==23.1.0
3
+ aiohttp==3.8.4
4
+ aiosignal==1.3.1
5
+ altair==5.0.0
6
+ anyio==3.6.2
7
+ async-timeout==4.0.2
8
+ attrs==23.1.0
9
+ certifi==2023.5.7
10
+ charset-normalizer==3.1.0
11
+ click==8.1.3
12
+ contourpy==1.0.7
13
+ cycler==0.11.0
14
+ diffusers==0.16.1
15
+ fastapi==0.95.2
16
+ ffmpy==0.3.0
17
+ filelock==3.12.0
18
+ fonttools==4.39.4
19
+ frozenlist==1.3.3
20
+ fsspec==2023.5.0
21
+ gradio==3.30.0
22
+ gradio_client==0.2.4
23
+ h11==0.14.0
24
+ httpcore==0.17.0
25
+ httpx==0.24.0
26
+ huggingface-hub==0.14.1
27
+ idna==3.4
28
+ importlib-metadata==6.6.0
29
+ importlib-resources==5.12.0
30
+ Jinja2==3.1.2
31
+ jsonschema==4.17.3
32
+ kiwisolver==1.4.4
33
+ linkify-it-py==2.0.2
34
+ markdown-it-py==2.2.0
35
+ MarkupSafe==2.1.2
36
+ matplotlib==3.7.1
37
+ mdit-py-plugins==0.3.3
38
+ mdurl==0.1.2
39
+ mpmath==1.3.0
40
+ multidict==6.0.4
41
+ networkx==3.1
42
+ numpy==1.24.3
43
+ opencv-python==4.7.0.72
44
+ orjson==3.8.10
45
+ packaging==23.1
46
+ pandas==2.0.1
47
+ Pillow==9.5.0
48
+ pip==23.1.2
49
+ psutil==5.9.5
50
+ pydantic==1.10.7
51
+ pydub==0.25.1
52
+ Pygments==2.15.1
53
+ pyparsing==3.0.9
54
+ pyrsistent==0.19.3
55
+ python-dateutil==2.8.2
56
+ python-multipart==0.0.6
57
+ pytz==2023.3
58
+ PyYAML==6.0
59
+ regex==2023.5.5
60
+ requests==2.30.0
61
+ semantic-version==2.10.0
62
+ setuptools==67.7.2
63
+ six==1.16.0
64
+ sniffio==1.3.0
65
+ starlette==0.27.0
66
+ sympy==1.12
67
+ tokenizers==0.13.3
68
+ toolz==0.12.0
69
+ torch==2.0.1
70
+ tqdm==4.65.0
71
+ transformers==4.29.1
72
+ typing_extensions==4.5.0
73
+ tzdata==2023.3
74
+ uc-micro-py==1.0.2
75
+ urllib3==2.0.2
76
+ uvicorn==0.22.0
77
+ websockets==11.0.3
78
+ wheel==0.40.0
79
+ yarl==1.9.2
80
+ zipp==3.15.0
static/load_from_artwork.js ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ async () => {
2
+ const urlParams = new URLSearchParams(window.location.search);
3
+ const username = urlParams.get('username');
4
+ const artworkId = urlParams.get('artworkId');
5
+
6
+ const LOAD_URL = `http://127.0.0.1:5000/v1/api/load-parameters/${artworkId}`;
7
+ const response = await fetch(LOAD_URL, {
8
+ method: 'GET',
9
+ headers: {
10
+ 'X-Requested-With': 'XMLHttpRequest',
11
+ }
12
+ });
13
+
14
+ // Check if the response is okay
15
+ if (!response.ok) {
16
+ console.error("An error occurred while fetching the parameters.");
17
+ return;
18
+ }
19
+
20
+ const parameters = await response.json(); // Assuming you're getting a JSON response
21
+
22
+ // Get the necessary elements
23
+ const gradioEl = document.querySelector('gradio-app');
24
+ const promptInput = gradioEl.querySelector('#prompt-text-input textarea');
25
+ const negativePromptInput = gradioEl.querySelector('#negative-prompt-text-input textarea');
26
+
27
+ // Get the slider inputs
28
+ const guidanceScaleInput = gradioEl.querySelector('#guidance-scale-slider input');
29
+ const numInferenceStepInput = gradioEl.querySelector('#num-inference-step-slider input');
30
+ const imageSizeInput = gradioEl.querySelector('#image-size-slider input');
31
+ const seedInput = gradioEl.querySelector('#seed-slider input');
32
+
33
+ // Get the dropdown inputs
34
+ const modelDropdown = gradioEl.querySelector('#model-dropdown input');
35
+ const schedulerDropdown = gradioEl.querySelector('#scheduler-dropdown input');
36
+
37
+ // Set the values based on the parameters received
38
+ promptInput.value = parameters.text_prompt;
39
+ negativePromptInput.value = parameters.negative_prompt;
40
+ guidanceScaleInput.value = parameters.model_guidance_scale;
41
+ numInferenceStepInput.value = parameters.model_num_steps;
42
+ imageSizeInput.value = parameters.model_image_size;
43
+ seedInput.value = parameters.seed;
44
+ modelDropdown.value = parameters.model_name;
45
+ schedulerDropdown.value = parameters.scheduler_name;
46
+ }
static/save_artwork.js ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ async () => {
2
+ // Get the username from the URL itself
3
+ const gradioEl = document.querySelector('gradio-app');
4
+ const imgEls = gradioEl.querySelectorAll('#gallery img');
5
+
6
+ // Get the necessary fields
7
+ const promptTxt = gradioEl.querySelector('#prompt-text-input textarea').value;
8
+ const negativePromptTxt = gradioEl.querySelector('#negative-prompt-text-input textarea').value;
9
+
10
+ // Get values from the sliders
11
+ const modelGuidanceScale = parseFloat(gradioEl.querySelector('#guidance-scale-slider input').value);
12
+
13
+ const numSteps = parseInt(gradioEl.querySelector('#num-inference-step-slider input').value);
14
+ const imageSize = parseInt(gradioEl.querySelector('#image-size-slider input').value);
15
+ const seed = parseInt(gradioEl.querySelector('#seed-slider input').value);
16
+
17
+ // Get the values from dropdowns
18
+ const modelName = gradioEl.querySelector('#model-dropdown input').value;
19
+ const schedulerName = gradioEl.querySelector('#scheduler-dropdown input').value;
20
+
21
+ const shareBtnEl = gradioEl.querySelector('#share-btn');
22
+ const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
23
+ const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
24
+
25
+ if(!imgEls.length){
26
+ return;
27
+ };
28
+
29
+ shareBtnEl.style.pointerEvents = 'none';
30
+ shareIconEl.style.display = 'none';
31
+ loadingIconEl.style.removeProperty('display');
32
+ const files = await Promise.all(
33
+ [...imgEls].map(async (imgEl) => {
34
+ const res = await fetch(imgEl.src);
35
+ const blob = await res.blob();
36
+ const fileSrc = imgEl.src.split('/').pop(); // Get the file name from the img src path
37
+ const imgId = Date.now();
38
+ const fileName = `${fileSrc}-${imgId}.jpg`; // Fixed fileName construction
39
+ return new File([blob], fileName, { type: 'image/jpeg' });
40
+ })
41
+ );
42
+
43
+ // Ensure that only one image is uploaded by taking the first element if there are multiple
44
+ if (files.length > 1) {
45
+ files.splice(1, files.length - 1);
46
+ }
47
+
48
+ const urls = await Promise.all(files.map((f) => uploadFile(
49
+ f,
50
+ promptTxt,
51
+ negativePromptTxt,
52
+ modelName,
53
+ schedulerName,
54
+ modelGuidanceScale,
55
+ numSteps,
56
+ imageSize,
57
+ seed,
58
+ )));
59
+
60
+ shareBtnEl.style.removeProperty('pointer-events');
61
+ shareIconEl.style.removeProperty('display');
62
+ loadingIconEl.style.display = 'none';
63
+ }
utils/__init__.py ADDED
File without changes
utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (165 Bytes). View file
 
utils/__pycache__/device.cpython-310.pyc ADDED
Binary file (884 Bytes). View file
 
utils/__pycache__/image.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
utils/__pycache__/log.cpython-310.pyc ADDED
Binary file (627 Bytes). View file
 
utils/__pycache__/prompt2prompt.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
utils/device.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+ import torch
3
+
4
+ def set_device(device : Union[str, torch.device]) -> torch.device:
5
+ """
6
+ Set the device to use for inference. Recommended to use GPU.
7
+ Arguments:
8
+ device Union[str, torch.device]
9
+ The device to use for inference. Can be either a string or a torch.device object.
10
+
11
+ Returns:
12
+ torch.device
13
+ The device to use for inference.
14
+ """
15
+ if isinstance(device, str):
16
+ if device == 'cuda' and torch.cuda.is_available():
17
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
18
+ elif device == 'mps' and torch.backends.mps.is_built():
19
+ device = torch.device('mps')
20
+ else:
21
+ device = torch.device(device)
22
+ return device
utils/image.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ from datetime import datetime, timezone
3
+ import uuid
4
+ import boto3
5
+ from config import AWSConfig
6
+
7
+ #Upload data to s3
8
+ def write_to_s3(image, fname, region_name='ap-south-1'):
9
+ """
10
+ Write an image to s3. Returns the url. Requires AWSConfig
11
+ # TODO : Add error handling
12
+ # TODO : Add logging
13
+ """
14
+ s3 = boto3.client('s3', region_name,aws_access_key_id=AWSConfig.aws_access_key_id, aws_secret_access_key=AWSConfig.aws_secret_access_key)
15
+ s3.upload_fileobj(image,AWSConfig.bucket_name,fname)
16
+ return f'https://{AWSConfig.bucket_name}.s3.{region_name}.amazonaws.com/{fname}'
17
+
18
+ def save_image(img):
19
+ """
20
+ Save an image to s3. Returns the url and filename for JSON output
21
+ # TODO : Add error handling
22
+ """
23
+ in_mem_file = io.BytesIO()
24
+ img.save(in_mem_file, format = 'png')
25
+ in_mem_file.seek(0)
26
+ dt = datetime.now()
27
+ file_name = str(uuid.uuid4())+'-'+str(int(dt.replace(tzinfo=timezone.utc).timestamp()))
28
+ img_url = write_to_s3(in_mem_file,f'sdimage/{file_name}.jpeg')
29
+ return img_url,file_name