Spaces:
No application file
No application file
# -*- coding: utf-8 -*- | |
"""StableDiffusionUI_Tom_Riddle_Edition.ipynb | |
Automatically generated by Colaboratory. | |
Original file is located at | |
https://colab.research.google.com/github/WASasquatch/StableDiffusionUI-TomRiddle/blob/main/StableDiffusionUI_Tom_Riddle_Edition.ipynb | |
# StableDiffusionUI - Tom Riddle Edition ![visitors](https://visitor-badge.glitch.me/badge?page_id=TomRiddleEdition&left_color=blue&right_color=orange) [![GitHub](https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white)](https://github.com/WASasquatch/StableDiffusionUI-TomRiddle) | |
Adapted from: https://colab.research.google.com/drive/1AfAmwLMd_Vx33O9IwY2TmO9wKZ8ABRRa | |
Cleaned up for font-end use by **WAS** | |
## Stablity.AI Model Terms of Use | |
**By using this Notebook, you agree to the following Terms of Use, and license** | |
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. | |
The CreativeML OpenRAIL License specifies: | |
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content | |
2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license | |
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) | |
Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license | |
""" | |
#@title Check GPU Allocation | |
!nvidia-smi | |
import os, subprocess, time | |
#@title <font size="6" color="orange">Setup Installations</font> | |
#@markdown Installation will restart the colab at the end. This is OK. Just run '<font color="green">**Launch WebUI**</font>' when it's restarted. | |
CLEAR_SETUP_LOG = True #@param{type:'boolean'} | |
ALLOW_COLAB_RESTART = True #@param{type: 'boolean'} | |
WD = '/content/stable-diffusion-webui' | |
def clear(): | |
from IPython.display import clear_output; return clear_output() | |
def fetch_bytes(url_or_path): | |
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'): | |
from urllib.request import urlopen | |
return urlopen(url_or_path) | |
return open(url_or_path, 'r') | |
def packages(): | |
import sys, subprocess | |
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()] | |
res = '' | |
print('β³ Installing Stable Diffusion WebUI ...') | |
os.chdir('/content/') | |
if not os.path.exists(WD): | |
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/AUTOMATIC1111/stable-diffusion-webui'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
os.chdir(WD) | |
res += subprocess.run(['pip', '-q', 'install', '-r', 'requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
print(res) | |
print('β Stable Diffusion WebUI installed.') | |
#os.chdir(WD) | |
if not os.path.exists('repositories'): | |
os.makedirs('repositories') | |
res = '' | |
print('β³ Cloning repositories...') | |
if not os.path.exists(f'{WD}/repositories/stable-diffusion'): | |
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/stable-diffusion.git', f'{WD}/repositories/stable-diffusion'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
if not os.path.exists(f'{WD}/repositories/taming-transformers'): | |
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/CompVis/taming-transformers.git', f'{WD}/repositories/taming-transformers'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
if not os.path.exists(f'{WD}/repositories/CodeFormer'): | |
os.chdir(f'{WD}/repositories') | |
res += subprocess.run(['git', 'clone', '--quiet', 'https://github.com/sczhou/CodeFormer.git'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
res += subprocess.run(['pip', '-q', 'install', '-r', f'{WD}/repositories/CodeFormer/requirements.txt'], stdout=subprocess.PIPE).stdout.decode('utf-8') | |
print(res) | |
print('β Repositories cloned.') | |
os.chdir(WD) | |
# get a token from https://huggingface.co/settings/tokens | |
try: | |
with fetch_bytes('https://raw.githubusercontent.com/WASasquatch/easydiffusion/main/key.txt') as f: | |
k = f.read().decode('utf-8').split(':'); hu = k[0].strip(); ht = k[1].strip() | |
except OSError as e: | |
raise e | |
#user_token = "" #@param {type:"string"} | |
print('β³ Downloading model ...') | |
if not os.path.exists('model.ckpt'): | |
print(subprocess.run(['wget', '-q', '--show-progress', f'https://{hu}:{ht}@huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt', '-O', 'model.ckpt'], stdout=subprocess.PIPE).stdout.decode('utf-8')) | |
if os.path.exists('model.ckpt'): | |
print('β Model downloaded.') | |
else: | |
print('β οΈ Unable to download the model!') | |
else: | |
print('β Model downloaded.') | |
if not os.path.exists('GFPGANv1.3.pth'): | |
print(subprocess.run(['wget', '-q', '--show-progress', 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth', '-O', 'GFPGANv1.3.pth'], stdout=subprocess.PIPE).stdout.decode('utf-8')) | |
os.chdir(WD) | |
subprocess.run(['git', 'pull', '--quiet'], stdout=subprocess.DEVNULL) | |
if CLEAR_SETUP_LOG: | |
clear() | |
print('β»οΈ Setup finished! Restarting Colab Environment in 5 seconds ...') | |
time.sleep(5) | |
if ALLOW_COLAB_RESTART: | |
os.kill(os.getpid(), 9) # This will crash Colab (required, everything will still be intact so dont worry) | |
"""Launch WebUI. You will get a link to nnn.gradio.app, follow it.""" | |
#@title <font size="6" color="green">Launch WebUI</font> | |
import sys, os | |
WD = '/content/stable-diffusion-webui' | |
sys.argv = ["--opt-split-attention"] | |
os.chdir(WD) | |
import webui | |
import modules.ui | |
import modules.txt2img | |
import modules.img2img | |
demo = modules.ui.create_ui( | |
txt2img=webui.wrap_gradio_gpu_call(modules.txt2img.txt2img), | |
img2img=webui.wrap_gradio_gpu_call(modules.img2img.img2img), | |
run_extras=webui.wrap_gradio_gpu_call(modules.extras.run_extras), | |
run_pnginfo=modules.extras.run_pnginfo | |
) | |
demo.launch(share=True) |