Ashley Wright commited on
Commit
f18e889
0 Parent(s):
.gitmodules ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [submodule "sdxl-newdream-20"]
2
+ path = models/models--stablediffusionapi--newdream-sdxl-20/snapshots/4bdd502bca7abd1ea57ee12fba0b0f23052958cc
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
5
+
models/models--stablediffusionapi--newdream-sdxl-20/refs/main ADDED
@@ -0,0 +1 @@
 
 
1
+ 4bdd502bca7abd1ea57ee12fba0b0f23052958cc
models/models--stablediffusionapi--newdream-sdxl-20/snapshots/4bdd502bca7abd1ea57ee12fba0b0f23052958cc ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit 4bdd502bca7abd1ea57ee12fba0b0f23052958cc
pyproject.toml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "edge-maxxing-4090-newdream"
7
+ description = "An edge-maxxing model submission for the 4090 newdream contest"
8
+ requires-python = ">=3.10,<3.11"
9
+ version = "1.0.0"
10
+ dependencies = [
11
+ # "diffusers==0.29.0",
12
+ # "transformers==4.41.2",
13
+ # "accelerate==0.31.0",
14
+ # "omegaconf==2.3.0",
15
+ "edge-maxxing-pipelines @ git+ssh://git@github.com/womboai/edge-maxxing@docker-submissions#subdirectory=pipelines",
16
+ ]
src/main.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from socket import socket, AF_UNIX, SOCK_STREAM
3
+ from sys import byteorder
4
+
5
+ from pipelines.models import TextToImageRequest
6
+
7
+ from pipeline import load_pipeline, infer
8
+
9
+ SOCKET = "/sandbox/inferences.sock"
10
+
11
+
12
+ def main():
13
+ pipeline = load_pipeline()
14
+
15
+ with socket(AF_UNIX, SOCK_STREAM) as inference_socket:
16
+ inference_socket.bind(SOCKET)
17
+
18
+ inference_socket.listen(1)
19
+ connection, _ = inference_socket.accept()
20
+
21
+ with connection:
22
+ while True:
23
+ size = int.from_bytes(connection.recv(2), byteorder)
24
+
25
+ request = TextToImageRequest.model_validate_json(connection.recv(size).decode("utf-8"))
26
+
27
+ image = infer(request, pipeline)
28
+
29
+ data = BytesIO()
30
+ image.save(data, format=image.format)
31
+
32
+ packet = data.getvalue()
33
+
34
+ connection.send(len(packet).to_bytes(4, byteorder))
35
+ connection.send(packet)
36
+
37
+
38
+ if __name__ == '__main__':
39
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL.Image import Image
2
+ from diffusers import StableDiffusionXLPipeline
3
+ from pipelines.models import TextToImageRequest
4
+ from torch import Generator
5
+
6
+
7
+ def load_pipeline() -> StableDiffusionXLPipeline:
8
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
9
+ "stablediffusionapi/newdream-sdxl-20",
10
+ revision="4bdd502bca7abd1ea57ee12fba0b0f23052958cc",
11
+ cache_dir="./models",
12
+ local_files_only=True,
13
+ ).to("cuda")
14
+
15
+ pipeline(prompt="")
16
+
17
+ return pipeline
18
+
19
+
20
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
21
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
22
+
23
+ return pipeline(
24
+ prompt=request.prompt,
25
+ negative_prompt=request.negative_prompt,
26
+ width=request.width,
27
+ height=request.height,
28
+ generator=generator,
29
+ ).images