ziffir commited on
Commit
d1f1153
1 Parent(s): f0145d3

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +94 -0
  2. README.md +10 -0
  3. app.py +218 -0
  4. gitattributes +35 -0
Dockerfile ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+
5
+ RUN apt-get update && apt-get install -y git libgl1-mesa-glx libglib2.0-0
6
+
7
+ RUN apt-get update && apt-get install -y unzip build-essential aria2 cmake llvm
8
+
9
+ RUN useradd -m -u 1000 user
10
+
11
+ USER user
12
+
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH \
15
+ PYTHONPATH=$HOME/app \
16
+ PYTHONUNBUFFERED=1 \
17
+ GRADIO_ALLOW_FLAGGING=never \
18
+ GRADIO_NUM_PORTS=1 \
19
+ GRADIO_SERVER_NAME=0.0.0.0 \
20
+ GRADIO_THEME=huggingface \
21
+ GRADIO_SHARE=False \
22
+ SYSTEM=spaces
23
+
24
+ # Set the working directory to the user's home directory
25
+ WORKDIR $HOME/app
26
+
27
+ # Clone your repository or add your code to the container
28
+ RUN git clone -b main https://github.com/fffiloni/PASD $HOME/app
29
+ RUN pip install torchaudio
30
+ RUN pip install torch --upgrade
31
+ RUN pip install torchaudio --upgrade
32
+ RUN pip install torchvision
33
+ RUN pip install -q spaces gradio gradio_imageslider diffusers==0.21.4 accelerate basicsr ultralytics salesforce-lavis webdataset pytorch_lightning
34
+ RUN pip install -q https://download.pytorch.org/whl/cu121/xformers-0.0.22.post7-cp310-cp310-manylinux2014_x86_64.whl
35
+
36
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/akhaliq/RetinaFace-R50/resolve/main/RetinaFace-R50.pth -d $HOME/app/annotator/ckpts -o RetinaFace-R50.pth
37
+
38
+ # Define base model URL
39
+ ENV BaseModelUrl=https://huggingface.co/runwayml/stable-diffusion-v1-5
40
+ ENV BaseModelDir=$HOME/app/checkpoints/stable-diffusion-v1-5
41
+
42
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/model_index.json -d ${BaseModelDir} -o model_index.json
43
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.bin -d ${BaseModelDir}/vae -o diffusion_pytorch_model.bin
44
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.safetensors -d ${BaseModelDir}/vae -o diffusion_pytorch_model.safetensors
45
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/vae/config.json -d ${BaseModelDir}/vae -o config.json
46
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.bin -d ${BaseModelDir}/unet -o diffusion_pytorch_model.bin
47
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.safetensors -d ${BaseModelDir}/unet -o diffusion_pytorch_model.safetensors
48
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/unet/config.json -d ${BaseModelDir}/unet -o config.json
49
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/tokenizer/vocab.json -d ${BaseModelDir}/tokenizer -o vocab.json
50
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/tokenizer/tokenizer_config.json -d ${BaseModelDir}/tokenizer -o tokenizer_config.json
51
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/tokenizer/special_tokens_map.json -d ${BaseModelDir}/tokenizer -o special_tokens_map.json
52
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/tokenizer/merges.txt -d ${BaseModelDir}/tokenizer -o merges.txt
53
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/text_encoder/pytorch_model.bin -d ${BaseModelDir}/text_encoder -o pytorch_model.bin
54
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/text_encoder/model.safetensors -d ${BaseModelDir}/text_encoder -o model.safetensors
55
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/text_encoder/config.json -d ${BaseModelDir}/text_encoder -o config.json
56
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/scheduler/scheduler_config.json -d ${BaseModelDir}/scheduler -o scheduler_config.json
57
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/safety_checker/pytorch_model.bin -d ${BaseModelDir}/safety_checker -o pytorch_model.bin
58
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/resolve/main/safety_checker/model.safetensors -d ${BaseModelDir}/safety_checker -o model.safetensors
59
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/safety_checker/config.json -d ${BaseModelDir}/safety_checker -o config.json
60
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M ${BaseModelUrl}/raw/main/feature_extractor/preprocessor_config.json -d ${BaseModelDir}/feature_extractor -o preprocessor_config.json
61
+
62
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/majicmixRealistic_v6.safetensors -d $HOME/app/checkpoints/personalized_models -o majicmixRealistic_v6.safetensors
63
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd/checkpoint-100000/scaler.pt -d $HOME/app/runs/pasd/checkpoint-100000 -o scaler.pth
64
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd/checkpoint-100000/unet/config.json -d $HOME/app/runs/pasd/checkpoint-100000/unet -o config.json
65
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd/checkpoint-100000/unet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd/checkpoint-100000/unet -o diffusion_pytorch_model.safetensors
66
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd/checkpoint-100000/controlnet/config.json -d $HOME/app/runs/pasd/checkpoint-100000/controlnet -o config.json
67
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd/checkpoint-100000/controlnet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd/checkpoint-100000/controlnet -o diffusion_pytorch_model.safetensors
68
+
69
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light/checkpoint-120000/scaler.pt -d $HOME/app/runs/pasd_light/checkpoint-120000 -o scaler.pth
70
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_light/checkpoint-120000/unet/config.json -d $HOME/app/runs/pasd_light/checkpoint-120000/unet -o config.json
71
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light/checkpoint-120000/unet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_light/checkpoint-120000/unet -o diffusion_pytorch_model.safetensors
72
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_light/checkpoint-120000/controlnet/config.json -d $HOME/app/runs/pasd_light/checkpoint-120000/controlnet -o config.json
73
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light/checkpoint-120000/controlnet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_light/checkpoint-120000/controlnet -o diffusion_pytorch_model.safetensors
74
+
75
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light_rrdb/checkpoint-100000/scaler.pt -d $HOME/app/runs/pasd_light_rrdb/checkpoint-100000 -o scaler.pth
76
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_light_rrdb/checkpoint-100000/unet/config.json -d $HOME/app/runs/pasd_light_rrdb/checkpoint-100000/unet -o config.json
77
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light_rrdb/checkpoint-100000/unet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_light_rrdb/checkpoint-100000/unet -o diffusion_pytorch_model.safetensors
78
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_light_rrdb/checkpoint-100000/controlnet/config.json -d $HOME/app/runs/pasd_light_rrdb/checkpoint-100000/controlnet -o config.json
79
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_light_rrdb/checkpoint-100000/controlnet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_light_rrdb/checkpoint-100000/controlnet -o diffusion_pytorch_model.safetensors
80
+
81
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_rrdb/checkpoint-100000/scaler.pt -d $HOME/app/runs/pasd_rrdb/checkpoint-100000 -o scaler.pth
82
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_rrdb/checkpoint-100000/unet/config.json -d $HOME/app/runs/pasd_rrdb/checkpoint-100000/unet -o config.json
83
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_rrdb/checkpoint-100000/unet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_rrdb/checkpoint-100000/unet -o diffusion_pytorch_model.safetensors
84
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/raw/main/pasd_rrdb/checkpoint-100000/controlnet/config.json -d $HOME/app/runs/pasd_rrdb/checkpoint-100000/controlnet -o config.json
85
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/PASD/resolve/main/pasd_rrdb/checkpoint-100000/controlnet/diffusion_pytorch_model.safetensors -d $HOME/app/runs/pasd_rrdb/checkpoint-100000/controlnet -o diffusion_pytorch_model.safetensors
86
+
87
+ # Set the environment variable to specify the GPU device
88
+ ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
89
+ ENV CUDA_VISIBLE_DEVICES=0
90
+
91
+ COPY app.py .
92
+
93
+ # Run your app.py script
94
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: PASD Magnify
3
+ emoji: ✨
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ arxiv.org/abs/2308.14469
app.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import datetime
4
+ import einops
5
+ import gradio as gr
6
+ from gradio_imageslider import ImageSlider
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+ from PIL import Image
11
+ from pathlib import Path
12
+ from torchvision import transforms
13
+ import torch.nn.functional as F
14
+ from torchvision.models import resnet50, ResNet50_Weights
15
+
16
+ from pytorch_lightning import seed_everything
17
+ from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor
18
+ from diffusers import AutoencoderKL, DDIMScheduler, PNDMScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler
19
+
20
+ from pipelines.pipeline_pasd import StableDiffusionControlNetPipeline
21
+ from myutils.misc import load_dreambooth_lora, rand_name
22
+ from myutils.wavelet_color_fix import wavelet_color_fix
23
+ from annotator.retinaface import RetinaFaceDetection
24
+
25
+ use_pasd_light = False
26
+ face_detector = RetinaFaceDetection()
27
+
28
+ if use_pasd_light:
29
+ from models.pasd_light.unet_2d_condition import UNet2DConditionModel
30
+ from models.pasd_light.controlnet import ControlNetModel
31
+ else:
32
+ from models.pasd.unet_2d_condition import UNet2DConditionModel
33
+ from models.pasd.controlnet import ControlNetModel
34
+
35
+ pretrained_model_path = "checkpoints/stable-diffusion-v1-5"
36
+ ckpt_path = "runs/pasd/checkpoint-100000"
37
+ #dreambooth_lora_path = "checkpoints/personalized_models/toonyou_beta3.safetensors"
38
+ dreambooth_lora_path = "checkpoints/personalized_models/majicmixRealistic_v6.safetensors"
39
+ #dreambooth_lora_path = "checkpoints/personalized_models/Realistic_Vision_V5.1.safetensors"
40
+ weight_dtype = torch.float16
41
+ device = "cuda"
42
+
43
+ scheduler = UniPCMultistepScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler")
44
+ text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder")
45
+ tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer")
46
+ vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae")
47
+ feature_extractor = CLIPImageProcessor.from_pretrained(f"{pretrained_model_path}/feature_extractor")
48
+ unet = UNet2DConditionModel.from_pretrained(ckpt_path, subfolder="unet")
49
+ controlnet = ControlNetModel.from_pretrained(ckpt_path, subfolder="controlnet")
50
+ vae.requires_grad_(False)
51
+ text_encoder.requires_grad_(False)
52
+ unet.requires_grad_(False)
53
+ controlnet.requires_grad_(False)
54
+
55
+ unet, vae, text_encoder = load_dreambooth_lora(unet, vae, text_encoder, dreambooth_lora_path)
56
+
57
+ text_encoder.to(device, dtype=weight_dtype)
58
+ vae.to(device, dtype=weight_dtype)
59
+ unet.to(device, dtype=weight_dtype)
60
+ controlnet.to(device, dtype=weight_dtype)
61
+
62
+ validation_pipeline = StableDiffusionControlNetPipeline(
63
+ vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor,
64
+ unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False,
65
+ )
66
+ #validation_pipeline.enable_vae_tiling()
67
+ validation_pipeline._init_tiled_vae(decoder_tile_size=224)
68
+
69
+ weights = ResNet50_Weights.DEFAULT
70
+ preprocess = weights.transforms()
71
+ resnet = resnet50(weights=weights)
72
+ resnet.eval()
73
+
74
+ def resize_image(image_path, target_height):
75
+ # Open the image file
76
+ with Image.open(image_path) as img:
77
+ # Calculate the ratio to resize the image to the target height
78
+ ratio = target_height / float(img.size[1])
79
+ # Calculate the new width based on the aspect ratio
80
+ new_width = int(float(img.size[0]) * ratio)
81
+ # Resize the image
82
+ resized_img = img.resize((new_width, target_height), Image.LANCZOS)
83
+ # Save the resized image
84
+ #resized_img.save(output_path)
85
+ return resized_img
86
+
87
+ @spaces.GPU(enable_queue=True)
88
+ def inference(input_image, prompt, a_prompt, n_prompt, denoise_steps, upscale, alpha, cfg, seed):
89
+ input_image = resize_image(input_image, 512)
90
+ process_size = 768
91
+ resize_preproc = transforms.Compose([
92
+ transforms.Resize(process_size, interpolation=transforms.InterpolationMode.BILINEAR),
93
+ ])
94
+
95
+ # Get the current timestamp
96
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
97
+
98
+ with torch.no_grad():
99
+ seed_everything(seed)
100
+ generator = torch.Generator(device=device)
101
+
102
+ input_image = input_image.convert('RGB')
103
+ batch = preprocess(input_image).unsqueeze(0)
104
+ prediction = resnet(batch).squeeze(0).softmax(0)
105
+ class_id = prediction.argmax().item()
106
+ score = prediction[class_id].item()
107
+ category_name = weights.meta["categories"][class_id]
108
+ if score >= 0.1:
109
+ prompt += f"{category_name}" if prompt=='' else f", {category_name}"
110
+
111
+ prompt = a_prompt if prompt=='' else f"{prompt}, {a_prompt}"
112
+
113
+ ori_width, ori_height = input_image.size
114
+ resize_flag = False
115
+
116
+ rscale = upscale
117
+ input_image = input_image.resize((input_image.size[0]*rscale, input_image.size[1]*rscale))
118
+
119
+ #if min(validation_image.size) < process_size:
120
+ # validation_image = resize_preproc(validation_image)
121
+
122
+ input_image = input_image.resize((input_image.size[0]//8*8, input_image.size[1]//8*8))
123
+ width, height = input_image.size
124
+ resize_flag = True #
125
+
126
+ try:
127
+ image = validation_pipeline(
128
+ None, prompt, input_image, num_inference_steps=denoise_steps, generator=generator, height=height, width=width, guidance_scale=cfg,
129
+ negative_prompt=n_prompt, conditioning_scale=alpha, eta=0.0,
130
+ ).images[0]
131
+
132
+ if True: #alpha<1.0:
133
+ image = wavelet_color_fix(image, input_image)
134
+
135
+ if resize_flag:
136
+ image = image.resize((ori_width*rscale, ori_height*rscale))
137
+ except Exception as e:
138
+ print(e)
139
+ image = Image.new(mode="RGB", size=(512, 512))
140
+
141
+ # Convert and save the image as JPEG
142
+ image.save(f'result_{timestamp}.jpg', 'JPEG')
143
+
144
+ # Convert and save the image as JPEG
145
+ input_image.save(f'input_{timestamp}.jpg', 'JPEG')
146
+
147
+ return (f"input_{timestamp}.jpg", f"result_{timestamp}.jpg"), f"result_{timestamp}.jpg"
148
+
149
+ title = "Pixel-Aware Stable Diffusion for Real-ISR"
150
+ description = "Gradio Demo for PASD Real-ISR. To use it, simply upload your image, or click one of the examples to load them."
151
+ article = "<a href='https://github.com/yangxy/PASD' target='_blank'>Github Repo Pytorch</a>"
152
+ #examples=[['samples/27d38eeb2dbbe7c9.png'],['samples/629e4da70703193b.png']]
153
+
154
+ css = """
155
+ #col-container{
156
+ margin: 0 auto;
157
+ max-width: 720px;
158
+ }
159
+ #project-links{
160
+ margin: 0 0 12px !important;
161
+ column-gap: 8px;
162
+ display: flex;
163
+ justify-content: center;
164
+ flex-wrap: nowrap;
165
+ flex-direction: row;
166
+ align-items: center;
167
+ }
168
+ """
169
+
170
+ with gr.Blocks(css=css) as demo:
171
+ with gr.Column(elem_id="col-container"):
172
+ gr.HTML(f"""
173
+ <h2 style="text-align: center;">
174
+ PASD Magnify
175
+ </h2>
176
+ <p style="text-align: center;">
177
+ Pixel-Aware Stable Diffusion for Realistic Image Super-resolution and Personalized Stylization
178
+ </p>
179
+ <p id="project-links" align="center">
180
+ <a href='https://github.com/yangxy/PASD'><img src='https://img.shields.io/badge/Project-Page-Green'></a> <a href='https://huggingface.co/papers/2308.14469'><img src='https://img.shields.io/badge/Paper-Arxiv-red'></a>
181
+ </p>
182
+ <p style="margin:12px auto;display: flex;justify-content: center;">
183
+ <a href="https://huggingface.co/spaces/fffiloni/PASD?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg.svg" alt="Duplicate this Space"></a>
184
+ </p>
185
+
186
+ """)
187
+ with gr.Row():
188
+ with gr.Column():
189
+ input_image = gr.Image(type="filepath", sources=["upload"], value="samples/frog.png")
190
+ prompt_in = gr.Textbox(label="Prompt", value="Frog")
191
+ with gr.Accordion(label="Advanced settings", open=False):
192
+ added_prompt = gr.Textbox(label="Added Prompt", value='clean, high-resolution, 8k, best quality, masterpiece')
193
+ neg_prompt = gr.Textbox(label="Negative Prompt",value='dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
194
+ denoise_steps = gr.Slider(label="Denoise Steps", minimum=10, maximum=50, value=20, step=1)
195
+ upsample_scale = gr.Slider(label="Upsample Scale", minimum=1, maximum=4, value=2, step=1)
196
+ condition_scale = gr.Slider(label="Conditioning Scale", minimum=0.5, maximum=1.5, value=1.1, step=0.1)
197
+ classifier_free_guidance = gr.Slider(label="Classier-free Guidance", minimum=0.1, maximum=10.0, value=7.5, step=0.1)
198
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
199
+ submit_btn = gr.Button("Submit")
200
+ with gr.Column():
201
+ b_a_slider = ImageSlider(label="B/A result", position=0.5)
202
+ file_output = gr.File(label="Downloadable image result")
203
+
204
+ submit_btn.click(
205
+ fn = inference,
206
+ inputs = [
207
+ input_image, prompt_in,
208
+ added_prompt, neg_prompt,
209
+ denoise_steps,
210
+ upsample_scale, condition_scale,
211
+ classifier_free_guidance, seed
212
+ ],
213
+ outputs = [
214
+ b_a_slider,
215
+ file_output
216
+ ]
217
+ )
218
+ demo.queue().launch()
gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text