Gainward777 commited on
Commit
56373b9
1 Parent(s): d3248bd

Update sd/zerogpu_controller.py

Browse files
Files changed (1) hide show
  1. sd/zerogpu_controller.py +86 -83
sd/zerogpu_controller.py CHANGED
@@ -1,84 +1,87 @@
1
- #from ui.gradio_ui import ui
2
- import spaces
3
-
4
- from sd.utils.utils import *
5
- from utils.utils import sketch_process, prompt_preprocess
6
- #from sd.sd_controller import Controller
7
-
8
- #controller=Controller()
9
-
10
- MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
11
- "stabilityai/stable-diffusion-xl-base-1.0"]
12
- LORA_PATH='sd/lora/lora.safetensors'
13
- VAE=get_vae()
14
- CONTROLNET=get_controlnet()
15
- ADAPTER=get_adapter()
16
- SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
17
- DETECTOR=get_detector()
18
-
19
- FIRST_PIPE=get_pipe(vae=VAE,
20
- model_name=MODELS_NAMES[0],
21
- controlnet=CONTROLNET,
22
- lora_path=LORA_PATH)
23
-
24
- SECOND_PIPE=get_pipe(vae=VAE,
25
- model_name=MODELS_NAMES[1],
26
- adapter=ADAPTER,
27
- scheduler=SCHEDULER)
28
-
29
-
30
- @spaces.GPU
31
- def get_first_result(img, prompt, negative_prompt,
32
- controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
33
-
34
- substrate, resized_image = sketch_process(img)
35
- prompt=prompt_preprocess(prompt)
36
-
37
- FIRST_PIPE.to('cuda')
38
-
39
- result=FIRST_PIPE(image=substrate,
40
- control_image=resized_image,
41
- strength=strength,
42
- prompt=prompt,
43
- negative_prompt = negative_prompt,
44
- controlnet_conditioning_scale=float(controlnet_scale),
45
- generator=torch.manual_seed(0),
46
- num_inference_steps=n_steps,
47
- eta=eta)
48
-
49
- FIRST_PIPE.to('cpu')
50
-
51
- return result.images[0]
52
-
53
-
54
- @spaces.GPU
55
- def get_second_result(img, prompt, negative_prompt,
56
- g_scale=7.5, n_steps=25,
57
- adapter_scale=0.9, adapter_factor=1.0):
58
-
59
- DETECTOR.to('cuda')
60
- SECOND_PIPE.to('cuda')
61
-
62
- preprocessed_img=DETECTOR(img,
63
- detect_resolution=1024,
64
- image_resolution=1024,
65
- apply_filter=True).convert("L")
66
-
67
- result=SECOND_PIPE(prompt=prompt,
68
- negative_prompt=negative_prompt,
69
- image=preprocessed_img,
70
- guidance_scale=g_scale,
71
- num_inference_steps=n_steps,
72
- adapter_conditioning_scale=adapter_scale,
73
- adapter_conditioning_factor=adapter_factor,
74
- generator = torch.manual_seed(42))
75
-
76
- DETECTOR.to('cpu')
77
- SECOND_PIPE.to('cpu')
78
-
79
- return result.images[0]
80
-
81
-
82
-
83
-
 
 
 
84
  #ui(get_first_result, get_second_result) #controller)
 
1
+ #from ui.gradio_ui import ui
2
+ import spaces
3
+
4
+ from sd.prompt_helper import helper
5
+
6
+ from sd.utils.utils import *
7
+ from utils.utils import sketch_process, prompt_preprocess
8
+ #from sd.sd_controller import Controller
9
+
10
+ #controller=Controller()
11
+
12
+ MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
13
+ "stabilityai/stable-diffusion-xl-base-1.0"]
14
+ LORA_PATH='sd/lora/lora.safetensors'
15
+ VAE=get_vae()
16
+ CONTROLNET=get_controlnet()
17
+ ADAPTER=get_adapter()
18
+ SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
19
+ DETECTOR=get_detector()
20
+
21
+ FIRST_PIPE=get_pipe(vae=VAE,
22
+ model_name=MODELS_NAMES[0],
23
+ controlnet=CONTROLNET,
24
+ lora_path=LORA_PATH)
25
+
26
+ SECOND_PIPE=get_pipe(vae=VAE,
27
+ model_name=MODELS_NAMES[1],
28
+ adapter=ADAPTER,
29
+ scheduler=SCHEDULER)
30
+
31
+
32
+ @spaces.GPU
33
+ def get_first_result(img, prompt, negative_prompt,
34
+ controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
35
+
36
+ substrate, resized_image = sketch_process(img)
37
+ prompt=prompt_preprocess(prompt)
38
+
39
+ FIRST_PIPE.to('cuda')
40
+
41
+ result=FIRST_PIPE(image=substrate,
42
+ control_image=resized_image,
43
+ strength=strength,
44
+ prompt=prompt,
45
+ negative_prompt = negative_prompt,
46
+ controlnet_conditioning_scale=float(controlnet_scale),
47
+ generator=torch.manual_seed(0),
48
+ num_inference_steps=n_steps,
49
+ eta=eta)
50
+
51
+ FIRST_PIPE.to('cpu')
52
+
53
+ return result.images[0]
54
+
55
+
56
+ @spaces.GPU
57
+ def get_second_result(img, prompt, negative_prompt,
58
+ g_scale=7.5, n_steps=25,
59
+ adapter_scale=0.9, adapter_factor=1.0):
60
+
61
+ DETECTOR.to('cuda')
62
+ SECOND_PIPE.to('cuda')
63
+
64
+ preprocessed_img=DETECTOR(img,
65
+ detect_resolution=1024,
66
+ image_resolution=1024,
67
+ apply_filter=True).convert("L")
68
+
69
+ result=SECOND_PIPE(prompt=prompt,
70
+ negative_prompt=negative_prompt,
71
+ image=preprocessed_img,
72
+ guidance_scale=g_scale,
73
+ num_inference_steps=n_steps,
74
+ adapter_conditioning_scale=adapter_scale,
75
+ adapter_conditioning_factor=adapter_factor,
76
+ generator = torch.manual_seed(42))
77
+
78
+ DETECTOR.to('cpu')
79
+ SECOND_PIPE.to('cpu')
80
+
81
+ return result.images[0]
82
+
83
+
84
+ def get_help_w_prompt(img):
85
+ return helper.get_help(img)
86
+
87
  #ui(get_first_result, get_second_result) #controller)