KawaiiApp commited on
Commit
513b0b2
1 Parent(s): 8024c78

added stable diff pipeline

Browse files
Files changed (1) hide show
  1. handler.py +15 -4
handler.py CHANGED
@@ -1,5 +1,9 @@
1
  from typing import Dict, List, Any
2
  import torch
 
 
 
 
3
  from torch import autocast
4
  from diffusers import StableDiffusionPipeline
5
  import base64
@@ -13,9 +17,9 @@ if device.type != 'cuda':
13
  raise ValueError("need to run on GPU")
14
 
15
  class EndpointHandler():
16
- def __init__(self, path=""):
17
  # load the optimized model
18
- self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
19
  self.pipe = self.pipe.to(device)
20
 
21
 
@@ -27,11 +31,18 @@ class EndpointHandler():
27
  Return:
28
  A :obj:`dict`:. base64 encoded image
29
  """
30
- inputs = data.pop("inputs", data)
 
 
 
 
31
 
32
  # run inference pipeline
33
  with autocast(device.type):
34
- image = self.pipe(inputs, guidance_scale=7.5)["sample"][0]
 
 
 
35
 
36
  # encode image as base 64
37
  buffered = BytesIO()
 
1
  from typing import Dict, List, Any
2
  import torch
3
+ import os
4
+ import PIL
5
+ from PIL import Image
6
+
7
  from torch import autocast
8
  from diffusers import StableDiffusionPipeline
9
  import base64
 
17
  raise ValueError("need to run on GPU")
18
 
19
  class EndpointHandler():
20
+ def __init__(self, path="tomriddle/anythinv3-vae"):
21
  # load the optimized model
22
+ self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16,low_cpu_mem_usage=False)
23
  self.pipe = self.pipe.to(device)
24
 
25
 
 
31
  Return:
32
  A :obj:`dict`:. base64 encoded image
33
  """
34
+ postive_prompt = data.pop("postive_prompt", data)
35
+ negative_prompt = data.pop("negative_prompt", None)
36
+ height = data.pop("height", 512)
37
+ width = data.pop("width", 512)
38
+ guidance_scale = data.pop("guidance_scale", 7.5)
39
 
40
  # run inference pipeline
41
  with autocast(device.type):
42
+ if negative_prompt is None:
43
+ image = self.pipe(inputs,prompt = postive_prompt ,height = height ,width = width ,guidance_scale=float(guidance_scale))["sample"][0]
44
+ else:
45
+ image = self.pipe(inputs,prompt = postive_prompt ,negative_prompt = negative_prompt,height = height ,width = width ,guidance_scale=float(guidance_scale))["sample"][0]
46
 
47
  # encode image as base 64
48
  buffered = BytesIO()