ciCic commited on
Commit
b47d425
1 Parent(s): 313a5e9
Files changed (1) hide show
  1. app.py +30 -23
app.py CHANGED
@@ -1,44 +1,51 @@
1
  import gradio as gr
2
- import os
 
 
 
3
 
4
- import torch
5
- from diffusers import AutoPipelineForText2Image, LCMScheduler
6
- from torchvision.transforms.functional import to_pil_image, center_crop, resize, to_tensor
7
 
8
- device = 'cpu'
 
 
 
9
 
10
- model_id = "Lykon/dreamshaper-7"
11
- adapter_id = "latent-consistency/lcm-lora-sdv1-5"
12
 
13
- pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16)
14
- # pipe = AutoPipelineForText2Image.from_pretrained(model_id)
15
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
16
- pipe.to(device)
17
 
18
- # load and fuse lcm lora
19
- pipe.load_lora_weights(adapter_id)
20
- pipe.fuse_lora()
 
 
 
 
 
 
21
 
22
- prompt = "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k"
23
 
24
-
25
- @torch.no_grad()
26
- def generate(prompt, guidance_scale, num_inference_steps):
27
- image = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
28
- return image
29
 
30
 
31
  def app():
32
- return gr.Interface(generate,
33
  [gr.Textbox(
34
  label="Prompt",
35
  info="Enter your prompt",
36
  lines=3,
37
- value=prompt,
38
  ),
39
  gr.Slider(2, 20, value=7.5, label="Guidance Scale",
40
  info="Higher scale depicts more creativity"),
41
- gr.Slider(1, 50, value=4, label="Inference steps")
 
 
42
  ],
43
 
44
  gr.Image(type="pil",
 
1
  import gradio as gr
2
+ import requests
3
+ import base64
4
+ from PIL import Image
5
+ from io import BytesIO
6
 
 
 
 
7
 
8
+ def decode_base64_image(image_string):
9
+ base64_image = base64.b64decode(image_string)
10
+ buffer = BytesIO(base64_image)
11
+ return Image.open(buffer)
12
 
 
 
13
 
14
+ def inference(prompt, guidance_scale, num_inference_steps):
15
+ api_url = 'https://a02q342s5b.execute-api.us-east-2.amazonaws.com/reinvent-demo-inf2-sm-20231114'
 
 
16
 
17
+ prompt_input_one = {
18
+ "prompt": prompt,
19
+ "parameters": {
20
+ "num_inference_steps": num_inference_steps,
21
+ "guidance_scale": guidance_scale,
22
+ "seed": -1
23
+ },
24
+ "endpoint": "huggingface-pytorch-inference-neuronx-2023-11-14-21-22-10-388"
25
+ }
26
 
27
+ response_one = requests.post(api_url, json=prompt_input_one)
28
 
29
+ if response_one.status_code == 200:
30
+ result_one = response_one.json()
31
+ return decode_base64_image(result_one["generated_images"][0])
32
+ else:
33
+ return None
34
 
35
 
36
  def app():
37
+ return gr.Interface(inference,
38
  [gr.Textbox(
39
  label="Prompt",
40
  info="Enter your prompt",
41
  lines=3,
42
+ value="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
43
  ),
44
  gr.Slider(2, 20, value=7.5, label="Guidance Scale",
45
  info="Higher scale depicts more creativity"),
46
+ gr.Slider(1, 50, value=4, label="Inference steps",
47
+ info="Higher steps the more clarity"
48
+ )
49
  ],
50
 
51
  gr.Image(type="pil",