Qianli commited on
Commit
ecc9faf
1 Parent(s): 3290883

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -11
app.py CHANGED
@@ -1,21 +1,82 @@
1
- from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
2
  import torch
3
  import gradio as gr
 
 
 
 
 
 
 
 
4
 
5
  model_id = "stabilityai/stable-diffusion-2"
6
- device = "cuda" if torch.cuda.is_available() else "cpu"
7
  # Use the Euler scheduler here instead
8
  scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
9
- pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, revision="fp16")
10
- pipe = pipe.to(device)
11
- #pipe.enable_attention_slicing()
12
- #prompt = "a young doctor talk with a patient in the cartoon style"
13
- #num_imgs=3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  scale=7.5
15
  steps=100
16
- i=0
17
- def gen(prompt, scale, steps):
18
- image = pipe(prompt, height=768, width=768,guidance_scale=scale, num_inference_steps=steps).images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  return image[0]
20
- gr.Interface(fn=gen, inputs=['text', gr.Slider(1, 10, 7.5), gr.Slider(1, maximum=100, value=50, step=1)], outputs='image', title="Stable Diffusion 2.0 ZQL", description="SD 2.0. <b>WARNING:</b> My first implementation of SD2, and COOL.", article = "Code Ape: <a href=\"https://huggingface.co/qianli\">千里马</a>").launch()
 
 
21
 
 
1
+ from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler, StableDiffusionImg2ImgPipeline
2
  import torch
3
  import gradio as gr
4
+ from io import BytesIO
5
+ from PIL import Image
6
+ import numpy as np
7
+ #from trans import LanguageTrans
8
+ import requests
9
+ import http.client
10
+ import random
11
+ import json
12
 
13
  model_id = "stabilityai/stable-diffusion-2"
14
+
15
  # Use the Euler scheduler here instead
16
  scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
17
+
18
+ device = "cuda" if torch.cuda.is_available() else "cpu"
19
+
20
+ def is_contains_chinese(strs):
21
+ for _char in strs:
22
+ if '\u4e00' <= _char <='\u9fa5':
23
+ return True
24
+ return False
25
+ def trans_youdao(sentence):
26
+ """有道翻译"""
27
+ content = sentence
28
+ data = {
29
+ "i": content,
30
+ "from": "AUTO",
31
+ "to": "AUTO",
32
+ "smartresult": "dict",
33
+ "client": "fanyideskweb",
34
+ "doctype": "json",
35
+ "version": "2.1",
36
+ "keyfrom": "fanyi.web",
37
+ "action": "FY_BY_REALTIME",
38
+ "typoResult": "false"
39
+ }
40
+ response = requests.post("http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule",
41
+ data=data).json()
42
+ resp = response["translateResult"][0][0]["tgt"]
43
+ #print('{}'.format(resp))
44
+ #print('{}'.format(resp))
45
+ return(resp)
46
+ #return resp
47
+
48
+
49
  scale=7.5
50
  steps=100
51
+ #i=0
52
+ #examples = [["An adventurer is approached by a mysterious stranger in the tavern for a new quest."],[]]
53
+
54
+ def gen(prompt, input_image, strength, scale, steps):
55
+
56
+ if is_contains_chinese(prompt):
57
+
58
+ prompt = trans_youdao(prompt)
59
+
60
+ print(prompt)
61
+
62
+ #print(input_image)
63
+ if input_image is None:
64
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, revision="fp16", torch_dtype=torch.float16)
65
+ pipe = pipe.to(device)
66
+ pipe.enable_attention_slicing()
67
+ image = pipe(prompt=prompt, height=768, width=768,guidance_scale=scale, num_inference_steps=steps).images
68
+ else:
69
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id, scheduler=scheduler, revision="fp16", torch_dtype=torch.float16)
70
+ input_image=Image.fromarray(np.uint8(input_image))
71
+
72
+ input_image = input_image.resize((768, 768))
73
+ pipe = pipe.to(device)
74
+ pipe.enable_attention_slicing()
75
+ image = pipe(prompt=prompt, init_image=input_image, strength=strength, guidance_scale=scale, num_inference_steps=steps).images
76
+
77
+
78
  return image[0]
79
+ gr.Interface(fn=gen, inputs=['text', 'image', gr.Slider(0, 1, 0.75),gr.Slider(1, 10, 7.5), gr.Slider(1, maximum=100, value=50, step=1)], outputs='image', title="Bilingual Stable Diffusion 2.0", description="SD 2.0. <b>Welcome:</b> My implementation of SD2 both text and image to image, with both Chinese and English support.", article = "<b>Example</b>:a fabulous mountain view from the window of a luxury hotel with the sun rising in the sky <br> <b>Example</b>: 站在山上看远方的两个人<br> Code Ape: <a href=\"https://huggingface.co/qianli\">千里马</a>").launch(share=True)
80
+
81
+
82