File size: 2,575 Bytes
f4fd128
a8e0680
f4fd128
 
 
b78962d
f3f2d57
97321a0
4f11322
e39575a
 
f4fd128
e39575a
4db39da
 
 
 
e39575a
 
f4fd128
e39575a
 
f4fd128
424b3ed
dbd45ee
424b3ed
f4fd128
 
f3f2d57
4f11322
424b3ed
dbd45ee
97321a0
 
 
dbd45ee
424b3ed
 
dbd45ee
f4fd128
424b3ed
4f11322
 
 
f4fd128
 
e14b75b
 
b54ffb0
97321a0
5f6827d
acef489
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import gradio as gr
import modin.pandas as pd
import torch
import numpy as np
from PIL import Image
from diffusers import LCMScheduler,AutoencoderTiny, AutoPipelineForImage2Image
from diffusers.utils import load_image
import math
import time
model_id = "segmind/Segmind-Vega"
adapter_id = "segmind/Segmind-VegaRT"
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = AutoPipelineForImage2Image.from_pretrained(model_id, torch_dtype=torch.float16) if torch.cuda.is_available() else AutoPipelineForImage2Image.from_pretrained(model_id)
pipe.vae = AutoencoderTiny.from_pretrained(
    "madebyollin/taesd",
    use_safetensors=True,
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)

pipe = pipe.to(device)
pipe.load_lora_weights(adapter_id)
pipe.fuse_lora()

def resize(w,h,img):
    
    img = img.resize((w,h))
    return img

def infer(source_img, prompt, steps, seed, Strength):
    start = time.time()
    print("开始")
    img = Image.open(source_img)
    generator = torch.Generator(device).manual_seed(seed)  
    if int(steps * Strength) < 1:
        steps = math.ceil(1 / max(0.10, Strength))
    w, h = img.size
    newW = 512
    newH = int(h * newW / w)
    source_image = resize(newW,newH, img)
    source_image.save('source.png')
    image = pipe(prompt, image=source_image,width=newW,height=newH, strength=Strength, guidance_scale=0.0, num_inference_steps=steps).images[0]
    end = time.time()
    print("步数",steps)
    print("时间",end-start)
    return image

gr.Interface(fn=infer, inputs=[
    gr.Image(sources=["upload", "webcam", "clipboard"], type="filepath", label="Raw Image."), 
    gr.Textbox(label = 'Prompt Input Text. 77 Token (Keyword or Symbol) Maximum'),
    gr.Slider(1, 5, value = 2, step = 1, label = 'Number of Iterations'),
    gr.Slider(label = "Seed", minimum = 0, maximum = 987654321987654321, step = 1, randomize = True), 
    gr.Slider(label='Strength', minimum = 0.1, maximum = 1, step = .05, value = .5)], 
    outputs='image', title = "Stable Diffusion XL Turbo Image to Image Pipeline CPU", description = "For more information on Stable Diffusion XL Turbo see https://huggingface.co/stabilityai/sdxl-turbo <br><br>Upload an Image, Use your Cam, or Paste an Image. Then enter a Prompt, or let it just do its Thing, then click submit. For more informationon about Stable Diffusion or Suggestions for prompts, keywords, artists or styles see https://github.com/Maks-s/sd-akashic", 
    article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").queue(max_size=10).launch()