File size: 1,069 Bytes
8c016fe
 
5cb6eb4
8c016fe
 
2c2b997
 
 
8c016fe
 
 
 
 
 
 
 
 
 
5cb6eb4
fe1d3d5
ef4db60
2c2b997
 
 
 
 
 
15becb4
2c2b997
15becb4
 
 
5cb6eb4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import os

import gradio as gr
from transformers import pipeline

from diffusers import StableDiffusionPipeline
import torch


sd_description = "ζ–‡ε­—η”Ÿζˆε›Ύη‰‡"
sd_examples = [["小猫"], ["cat"], ["dog"]]
sd_demo = gr.Interface.load("models/runwayml/stable-diffusion-v1-5", title='ζ–‡ε­—η”Ÿζˆε›Ύη‰‡', examples=sd_examples)


pipe = pipeline("image-classification")
examples = [[os.path.join(os.path.dirname(__file__), "lion.jpg")], [os.path.join(os.path.dirname(__file__), "cat.jpeg")]]
app = gr.Interface.from_pipeline(pipe, examples=examples, title='ε›Ύη‰‡θ―†εˆ«')


model_id = "dreamlike-art/dreamlike-photoreal-2.0"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe_v1 = pipe.to("cpu")


def generate_image_v1(prompt):
    return pipe_v1(prompt).images[0]

examples = [["落ζ—₯"], ["ζ²™ζ»©"]]
app_v1 = gr.Interface(fn=generate_image_v1, inputs="text", outputs="image", examples=examples)


demo = gr.TabbedInterface([sd_demo, app, app_v1], ["ζ–‡ε­—η”Ÿζˆε›Ύη‰‡", "ε›Ύη‰‡θ―†εˆ«", "ζ–‡ε­—η”Ÿζˆε›Ύη‰‡v1"])


demo.launch()