Spaces:
No application file
No application file
File size: 6,193 Bytes
82bce4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
# -*- coding: utf-8 -*-
"""StableDiffusionUI_Tom_Riddle_Edition.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/WASasquatch/StableDiffusionUI-TomRiddle/blob/main/StableDiffusionUI_Tom_Riddle_Edition.ipynb
# StableDiffusionUI - Tom Riddle Edition ![visitors](https://visitor-badge.glitch.me/badge?page_id=TomRiddleEdition&left_color=blue&right_color=orange) [![GitHub](https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white)](https://github.com/WASasquatch/StableDiffusionUI-TomRiddle)
Adapted from: https://colab.research.google.com/drive/1AfAmwLMd_Vx33O9IwY2TmO9wKZ8ABRRa
Cleaned up for font-end use by **WAS**
## Stablity.AI Model Terms of Use
**By using this Notebook, you agree to the following Terms of Use, and license**
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
The CreativeML OpenRAIL License specifies:
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license
"""
#@title Check GPU Allocation
!nvidia-smi
import os, subprocess, time
#@title <font size="6" color="orange">Setup Installations</font>
#@markdown Installation will restart the colab at the end. This is OK. Just run '<font color="green">**Launch WebUI**</font>' when it's restarted.
CLEAR_SETUP_LOG = True #@param{type:'boolean'}
ALLOW_COLAB_RESTART = True #@param{type: 'boolean'}
WD = '/content/stable-diffusion-webui'
def clear():
from IPython.display import clear_output; return clear_output()
def fetch_bytes(url_or_path):
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
from urllib.request import urlopen
return urlopen(url_or_path)
return open(url_or_path, 'r')
def packages():
import sys, subprocess
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()]
res = ''
print('⏳ Installing Stable Diffusion WebUI ...')
os.chdir('/content/')
if not os.path.exists(WD):
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/AUTOMATIC1111/stable-diffusion-webui'], stdout=subprocess.PIPE).stdout.decode('utf-8')
os.chdir(WD)
res += subprocess.run(['pip', '-q', 'install', '-r', 'requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8')
print(res)
print('✅ Stable Diffusion WebUI installed.')
#os.chdir(WD)
if not os.path.exists('repositories'):
os.makedirs('repositories')
res = ''
print('⏳ Cloning repositories...')
if not os.path.exists(f'{WD}/repositories/stable-diffusion'):
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/stable-diffusion.git', f'{WD}/repositories/stable-diffusion'], stdout=subprocess.PIPE).stdout.decode('utf-8')
if not os.path.exists(f'{WD}/repositories/taming-transformers'):
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/taming-transformers.git', f'{WD}/repositories/taming-transformers'], stdout=subprocess.PIPE).stdout.decode('utf-8')
if not os.path.exists(f'{WD}/repositories/CodeFormer'):
os.chdir(f'{WD}/repositories')
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/sczhou/CodeFormer.git'], stdout=subprocess.PIPE).stdout.decode('utf-8')
res += subprocess.run(['pip', '-q', 'install', '-r', f'{WD}/repositories/CodeFormer/requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8')
print(res)
print('✅ Repositories cloned.')
os.chdir(WD)
# get a token from https://huggingface.co/settings/tokens
try:
with fetch_bytes('https://raw.githubusercontent.com/WASasquatch/easydiffusion/main/key.txt') as f:
k = f.read().decode('utf-8').split(':'); hu = k[0].strip(); ht = k[1].strip()
except OSError as e:
raise e
#user_token = "" #@param {type:"string"}
print('⏳ Downloading model ...')
if not os.path.exists('model.ckpt'):
print(subprocess.run(['wget', '-q', '--show-progress', f'https://{hu}:{ht}@huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt', '-O', 'model.ckpt'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
if os.path.exists('model.ckpt'):
print('✅ Model downloaded.')
else:
print('⚠️ Unable to download the model!')
else:
print('✅ Model downloaded.')
if not os.path.exists('GFPGANv1.3.pth'):
print(subprocess.run(['wget', '-q', '--show-progress', 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', '-O', 'GFPGANv1.3.pth'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
os.chdir(WD)
subprocess.run(['git', 'pull', '--quiet'], stdout=subprocess.DEVNULL)
if CLEAR_SETUP_LOG:
clear()
print('♻️ Setup finished! Restarting Colab Environment in 5 seconds ...')
time.sleep(5)
if ALLOW_COLAB_RESTART:
os.kill(os.getpid(), 9) # This will crash Colab (required, everything will still be intact so dont worry)
"""Launch WebUI. You will get a link to nnn.gradio.app, follow it."""
#@title <font size="6" color="green">Launch WebUI</font>
import sys, os
WD = '/content/stable-diffusion-webui'
sys.argv = ["--opt-split-attention"]
os.chdir(WD)
import webui
import modules.ui
import modules.txt2img
import modules.img2img
demo = modules.ui.create_ui(
txt2img=webui.wrap_gradio_gpu_call(modules.txt2img.txt2img),
img2img=webui.wrap_gradio_gpu_call(modules.img2img.img2img),
run_extras=webui.wrap_gradio_gpu_call(modules.extras.run_extras),
run_pnginfo=modules.extras.run_pnginfo
)
demo.launch(share=True) |