Amirhustler commited on
Commit
57cf3ee
·
verified ·
1 Parent(s): db655f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -48
app.py CHANGED
@@ -1,48 +1,49 @@
1
- import io, base64
2
- from fastapi import FastAPI
3
- from pydantic import BaseModel
4
- from PIL import Image
5
- import torch
6
-
7
- from optimum.intel.openvino import OVStableDiffusionPipeline
8
-
9
- # مدل بهینه برای CPU (INT8). می‌تونی بعداً عوضش کنی.
10
- MODEL_ID = "OpenVINO/stable-diffusion-v1-5-int8-ov"
11
-
12
- print("Loading model ...")
13
- pipe = OVStableDiffusionPipeline.from_pretrained(MODEL_ID)
14
- pipe.reshape(512, 512) # ثابت نگه داشتن ورودی => سریع‌تر روی CPU
15
- pipe.compile()
16
- print("Model loaded.")
17
-
18
- app = FastAPI(title="Txt2Img CPU API")
19
-
20
- class Req(BaseModel):
21
- prompt: str
22
- negative_prompt: str | None = None
23
- steps: int = 20
24
- guidance: float = 7.5
25
- seed: int | None = None
26
- width: int = 512
27
- height: int = 512
28
-
29
- @app.get("/healthz")
30
- def health():
31
- return {"ok": True}
32
-
33
- @app.post("/txt2img")
34
- def txt2img(r: Req):
35
- g = torch.Generator(device="cpu")
36
- if r.seed is not None:
37
- g.manual_seed(r.seed)
38
- image = pipe(
39
- prompt=r.prompt,
40
- negative_prompt=r.negative_prompt,
41
- num_inference_steps=r.steps,
42
- guidance_scale=r.guidance,
43
- width=r.width, height=r.height,
44
- generator=g
45
- ).images[0]
46
- buf = io.BytesIO()
47
- image.save(buf, format="PNG")
48
- return {"image_base64": base64.b64encode(buf.getvalue()).decode()}
 
 
1
+ import io, base64, os, random
2
+ from fastapi import FastAPI
3
+ from pydantic import BaseModel
4
+ from PIL import Image
5
+
6
+ print(">>> importing optimum.intel.openvino ...")
7
+ from optimum.intel.openvino import OVStableDiffusionPipeline
8
+ print(">>> import OK")
9
+
10
+ MODEL_ID = os.environ.get("MODEL_ID", "OpenVINO/stable-diffusion-v1-5-int8-ov")
11
+
12
+ print("Loading model ...")
13
+ pipe = OVStableDiffusionPipeline.from_pretrained(MODEL_ID)
14
+ pipe.reshape(512, 512) # برای CPU بهتر
15
+ pipe.compile()
16
+ print("Model loaded.")
17
+
18
+ app = FastAPI(title="Txt2Img CPU API")
19
+
20
+ class Req(BaseModel):
21
+ prompt: str
22
+ negative_prompt: str | None = None
23
+ steps: int = 20
24
+ guidance: float = 7.5
25
+ seed: int | None = None
26
+ width: int = 512
27
+ height: int = 512
28
+
29
+ @app.get("/healthz")
30
+ def health():
31
+ return {"ok": True}
32
+
33
+ @app.post("/txt2img")
34
+ def txt2img(r: Req):
35
+ # در OpenVINO نیازی به torch.Generator نیست؛ seed را روی random ست می‌کنیم (اختیاری)
36
+ if r.seed is not None:
37
+ random.seed(r.seed)
38
+
39
+ image = pipe(
40
+ prompt=r.prompt,
41
+ negative_prompt=r.negative_prompt,
42
+ num_inference_steps=r.steps,
43
+ guidance_scale=r.guidance,
44
+ width=r.width, height=r.height,
45
+ ).images[0]
46
+
47
+ buf = io.BytesIO()
48
+ image.save(buf, format="PNG")
49
+ return {"image_base64": base64.b64encode(buf.getvalue()).decode()}