ruslanmv commited on
Commit
1711bb3
1 Parent(s): eb3c530

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +31 -11
main.py CHANGED
@@ -1,7 +1,8 @@
1
  run_api = False
2
  is_ssd = False
3
  is_sdxl = False
4
- is_sdxl_turbo=True
 
5
  import os
6
  # Use GPU
7
  gpu_info = os.popen("nvidia-smi").read()
@@ -60,6 +61,9 @@ MAX_SEED = np.iinfo(np.int32).max
60
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
61
  SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
62
 
 
 
 
63
  # Uncomment the following line if you are using PyTorch 1.10 or later
64
  # os.environ["TORCH_USE_CUDA_DSA"] = "1"
65
 
@@ -129,6 +133,7 @@ if is_sdxl_turbo:
129
  use_cuda=is_gpu
130
  pipe = load_pipeline(use_cuda)
131
 
 
132
  def generate(
133
  prompt: str,
134
  negative_prompt: str = "",
@@ -146,16 +151,31 @@ def generate(
146
 
147
  generator = torch.Generator().manual_seed(seed)
148
 
149
- image = pipe(
150
- prompt=prompt,
151
- negative_prompt=negative_prompt,
152
- width=width,
153
- height=height,
154
- guidance_scale=guidance_scale,
155
- num_inference_steps=num_inference_steps,
156
- generator=generator,
157
- output_type="pil",
158
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  return image
160
 
161
 
 
1
  run_api = False
2
  is_ssd = False
3
  is_sdxl = False
4
+ is_sdxl_turbo=False
5
+ use_request=True
6
  import os
7
  # Use GPU
8
  gpu_info = os.popen("nvidia-smi").read()
 
61
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
62
  SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
63
 
64
+ API_TOKEN = os.environ.get("HF_READ_TOKEN")
65
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
66
+
67
  # Uncomment the following line if you are using PyTorch 1.10 or later
68
  # os.environ["TORCH_USE_CUDA_DSA"] = "1"
69
 
 
133
  use_cuda=is_gpu
134
  pipe = load_pipeline(use_cuda)
135
 
136
+
137
  def generate(
138
  prompt: str,
139
  negative_prompt: str = "",
 
151
 
152
  generator = torch.Generator().manual_seed(seed)
153
 
154
+
155
+ if not use_request:
156
+ image = pipe(
157
+ prompt=prompt,
158
+ negative_prompt=negative_prompt,
159
+ width=width,
160
+ height=height,
161
+ guidance_scale=guidance_scale,
162
+ num_inference_steps=num_inference_steps,
163
+ generator=generator,
164
+ output_type="pil",
165
+ ).images[0]
166
+
167
+ else:
168
+ API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
169
+ payload = {
170
+ "inputs": prompt ,
171
+ "is_negative": negative_prompt,
172
+ "steps": num_inference_steps,
173
+ "cfg_scale": guidance_scale,
174
+ "seed": seed if seed is not None else random.randint(-1, 2147483647)
175
+ }
176
+
177
+ image_bytes = requests.post(API_URL, headers=headers, json=payload).content
178
+ image = Image.open(io.BytesIO(image_bytes))
179
  return image
180
 
181