Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,844 Bytes
352c33a 5f71689 96f262a 352c33a 96f262a 61ec27a 352c33a 5f71689 352c33a 8f30ed0 352c33a 8f30ed0 352c33a 8f30ed0 cf8fbc0 352c33a cf8fbc0 4f04b28 352c33a 7b6639b 352c33a 8f30ed0 352c33a 4f04b28 352c33a 4f04b28 352c33a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
from PIL import Image
import gradio as gr
import spaces
from KandiSuperRes import get_SR_pipeline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
sr_pipe2x = get_SR_pipeline(device=device, fp16=True, flash=True, scale=2)
sr_pipe4x = get_SR_pipeline(device=device, fp16=True, flash=False, scale=4)
@spaces.GPU()
def inference(image, size):
if image is None:
raise gr.Error("Image not uploaded")
# r_image = Image.open(image)
if size == '2x':
result = sr_pipe2x(image)
else:
result = sr_pipe4x(image)
print(f"Image size ({device}): {size} ... OK")
return result
title = "KandiSuperRes - diffusion model for super resolution"
description = "KandiSuperRes Flash is a new version of the diffusion model for super resolution. This model includes a distilled version of the KandiSuperRes model and a distilled model Kandinsky 3.0 Flash. KandiSuperRes Flash not only improves image clarity, but also corrects artifacts, draws details, improves image aesthetics. And one of the most important advantages is the ability to use the model in the «infinite super resolution» mode."
article = "<div style='text-align: center;'>Twitter <a href='https://twitter.com/DoEvent' target='_blank'>Max Skobeev</a> | <a href='https://huggingface.co/ai-forever/KandiSuperRes' target='_blank'>Model card</a><div>"
gr.Interface(inference,
[gr.Image(type="pil"),
gr.Radio(['2x', '4x'],
type="value",
value='2x',
label='Resolution model')],
gr.Image(type="filepath", label="Output"),
title=title,
description=description,
article=article,
examples=[['groot.jpeg', "2x"]],
allow_flagging='never',
cache_examples=False,
delete_cache=(1800, 3600),
).queue(api_open=True).launch(show_error=True, show_api=True)
|