silencer107 commited on
Commit
24a8de9
1 Parent(s): 7027416

Upload folder using huggingface_hub

Browse files
Files changed (31) hide show
  1. .gitmodules +4 -0
  2. README.md +12 -0
  3. models/newdream-sdxl-20/.gitattributes +35 -0
  4. models/newdream-sdxl-20/.gitignore +2 -0
  5. models/newdream-sdxl-20/README.md +66 -0
  6. models/newdream-sdxl-20/model_index.json +33 -0
  7. models/newdream-sdxl-20/scheduler/scheduler_config.json +18 -0
  8. models/newdream-sdxl-20/text_encoder/config.json +24 -0
  9. models/newdream-sdxl-20/text_encoder/model.safetensors +3 -0
  10. models/newdream-sdxl-20/text_encoder/pytorch_model.bin +3 -0
  11. models/newdream-sdxl-20/text_encoder_2/config.json +24 -0
  12. models/newdream-sdxl-20/text_encoder_2/model.safetensors +3 -0
  13. models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin +3 -0
  14. models/newdream-sdxl-20/tokenizer/merges.txt +0 -0
  15. models/newdream-sdxl-20/tokenizer/special_tokens_map.json +24 -0
  16. models/newdream-sdxl-20/tokenizer/tokenizer_config.json +33 -0
  17. models/newdream-sdxl-20/tokenizer/vocab.json +0 -0
  18. models/newdream-sdxl-20/tokenizer_2/merges.txt +0 -0
  19. models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json +24 -0
  20. models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json +33 -0
  21. models/newdream-sdxl-20/tokenizer_2/vocab.json +0 -0
  22. models/newdream-sdxl-20/unet/config.json +71 -0
  23. models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin +3 -0
  24. models/newdream-sdxl-20/unet/diffusion_pytorch_model.safetensors +3 -0
  25. models/newdream-sdxl-20/vae/config.json +31 -0
  26. models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin +3 -0
  27. models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors +3 -0
  28. pyproject.toml +27 -0
  29. requirements.txt +1 -0
  30. src/main.py +50 -0
  31. src/pipeline.py +54 -0
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # edge-maxxing-newdream-sdxl
2
+
3
+ This holds the baseline for the SDXL Nvidia GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in pyproject.toml, including git dependencies
7
+ - Compiled models should be included directly in the repository(rather than compiling during loading), loading time matters far more than file sizes
8
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
9
+ - Change `requirements.txt` to add extra arguments to be used when installing the package
10
+
11
+ For testing, you need a docker container with pytorch and ubuntu 22.04,
12
+ you can download your listed dependencies with `pip install -r requirements.txt -e .`, and then running `start_inference`
models/newdream-sdxl-20/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/newdream-sdxl-20/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ step_*
2
+ epoch_*
models/newdream-sdxl-20/README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stablediffusionapi.com
5
+ - stable-diffusion-api
6
+ - text-to-image
7
+ - ultra-realistic
8
+ pinned: true
9
+ ---
10
+
11
+ # NewDream-SDXL 2.0 API Inference
12
+
13
+ ![generated from stablediffusionapi.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/8478583971702167737.png)
14
+ ## Get API Key
15
+
16
+ Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
17
+
18
+ Replace Key in below code, change **model_id** to "newdream-sdxl-20"
19
+
20
+ Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
21
+
22
+ Try model for free: [Generate Images](https://stablediffusionapi.com/models/newdream-sdxl-20)
23
+
24
+ Model link: [View model](https://stablediffusionapi.com/models/newdream-sdxl-20)
25
+
26
+ Credits: [View credits](https://civitai.com/?query=NewDream-SDXL%202.0)
27
+
28
+ View all models: [View Models](https://stablediffusionapi.com/models)
29
+
30
+ import requests
31
+ import json
32
+
33
+ url = "https://stablediffusionapi.com/api/v4/dreambooth"
34
+
35
+ payload = json.dumps({
36
+ "key": "your_api_key",
37
+ "model_id": "newdream-sdxl-20",
38
+ "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K",
39
+ "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
40
+ "width": "512",
41
+ "height": "512",
42
+ "samples": "1",
43
+ "num_inference_steps": "30",
44
+ "safety_checker": "no",
45
+ "enhance_prompt": "yes",
46
+ "seed": None,
47
+ "guidance_scale": 7.5,
48
+ "multi_lingual": "no",
49
+ "panorama": "no",
50
+ "self_attention": "no",
51
+ "upscale": "no",
52
+ "embeddings": "embeddings_model_id",
53
+ "lora": "lora_model_id",
54
+ "webhook": None,
55
+ "track_id": None
56
+ })
57
+
58
+ headers = {
59
+ 'Content-Type': 'application/json'
60
+ }
61
+
62
+ response = requests.request("POST", url, headers=headers, data=payload)
63
+
64
+ print(response.text)
65
+
66
+ > Use this coupon code to get 25% off **DMGG0RBN**
models/newdream-sdxl-20/model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.21.2",
4
+ "force_zeros_for_empty_prompt": true,
5
+ "scheduler": [
6
+ "diffusers",
7
+ "EulerDiscreteScheduler"
8
+ ],
9
+ "text_encoder": [
10
+ "transformers",
11
+ "CLIPTextModel"
12
+ ],
13
+ "text_encoder_2": [
14
+ "transformers",
15
+ "CLIPTextModelWithProjection"
16
+ ],
17
+ "tokenizer": [
18
+ "transformers",
19
+ "CLIPTokenizer"
20
+ ],
21
+ "tokenizer_2": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
models/newdream-sdxl-20/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.21.2",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "sample_max_value": 1.0,
12
+ "set_alpha_to_one": false,
13
+ "skip_prk_steps": true,
14
+ "steps_offset": 1,
15
+ "timestep_spacing": "leading",
16
+ "trained_betas": null,
17
+ "use_karras_sigmas": false
18
+ }
models/newdream-sdxl-20/text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.33.1",
23
+ "vocab_size": 49408
24
+ }
models/newdream-sdxl-20/text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f094c002b6f50986b68a714285e72a23c17d9e1b146b078a2219397c51e37a
3
+ size 246144152
models/newdream-sdxl-20/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192a06f4ef7ece4acb33fc3c717790ee37b1f5d85e48e0dcac54dfea93e584a2
3
+ size 246185562
models/newdream-sdxl-20/text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.33.1",
23
+ "vocab_size": 49408
24
+ }
models/newdream-sdxl-20/text_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a0d55a94e8508c869f35163fb6fcf34e02ea1b614d9259b47f97c562cff9575
3
+ size 1389382176
models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8841d5e4c05ce74941eb536ec3835f600cc82d36763fc5f30c69d09a886158c9
3
+ size 1389490462
models/newdream-sdxl-20/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/newdream-sdxl-20/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
models/newdream-sdxl-20/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "!",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
models/newdream-sdxl-20/tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/unet/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.21.2",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "sample_size": 128,
54
+ "time_cond_proj_dim": null,
55
+ "time_embedding_act_fn": null,
56
+ "time_embedding_dim": null,
57
+ "time_embedding_type": "positional",
58
+ "timestep_post_act": null,
59
+ "transformer_layers_per_block": [
60
+ 1,
61
+ 2,
62
+ 10
63
+ ],
64
+ "up_block_types": [
65
+ "CrossAttnUpBlock2D",
66
+ "CrossAttnUpBlock2D",
67
+ "UpBlock2D"
68
+ ],
69
+ "upcast_attention": false,
70
+ "use_linear_projection": true
71
+ }
models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56913b74b493462516ce770d525cca996feef7acd44cf0cba65de667a6c1d58c
3
+ size 5135669022
models/newdream-sdxl-20/unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9715fb3c6d0d81f46fbe6be46ddce05d91806aaf041504a52a44e1ec9e4f660f
3
+ size 5135149760
models/newdream-sdxl-20/vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.21.2",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 1024,
24
+ "scaling_factor": 0.13025,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c40ff3dc7adeb21dce76cd411d65828037efa0aa54432e3592418401cf8467
3
+ size 167404866
models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:915b909d0eeef5985462226b2c9950ca9da42b5a6ec8c296c2e73f6419ae465c
3
+ size 167335342
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "edge-maxxing-4090-newdream"
7
+ description = "An edge-maxxing model submission for the 4090 newdream contest"
8
+ requires-python = ">=3.10,<3.11"
9
+ version = "1.0.0"
10
+ dependencies = [
11
+ "diffusers==0.30.2",
12
+ "transformers==4.41.2",
13
+ "accelerate==0.31.0",
14
+ "omegaconf==2.3.0",
15
+ "tgate==1.0.0",
16
+ "DeepCache",
17
+ "PEFT",
18
+ "numpy==1.26.4",
19
+ "xformers==0.0.25.post1",
20
+ "triton==2.2.0",
21
+ "torch==2.2.2",
22
+ "torchvision==0.17.2",
23
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
24
+ ]
25
+
26
+ [project.scripts]
27
+ start_inference = "main:main"
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline)
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from time import perf_counter
3
+ from PIL.Image import Image
4
+ from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler, AutoencoderTiny, UNet2DConditionModel
5
+ from pipelines.models import TextToImageRequest
6
+ from torch import Generator
7
+ from sfast.compilers.diffusion_pipeline_compiler import (compile,
8
+ CompilationConfig)
9
+
10
+
11
+
12
+ def load_pipeline() -> StableDiffusionXLPipeline:
13
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
14
+ "./models/newdream-sdxl-20/",
15
+ torch_dtype=torch.float16,
16
+ use_safetensors=True,
17
+ variant="fp16",
18
+ local_files_only=True,)
19
+ pipeline.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
20
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
21
+ pipeline.scheduler.config)
22
+ pipeline.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
23
+ pipeline.to("cuda")
24
+
25
+ config = CompilationConfig.Default()
26
+
27
+ try:
28
+ import xformers
29
+ config.enable_xformers = True
30
+ except ImportError:
31
+ print('xformers not installed, skip')
32
+ try:
33
+ import triton
34
+ config.enable_triton = True
35
+ except ImportError:
36
+ print('Triton not installed, skip')
37
+
38
+ pipeline = compile(pipeline, config)
39
+ for _ in range(4):
40
+ pipeline(prompt="", num_inference_steps=15,)
41
+
42
+ return pipeline
43
+
44
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
45
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
46
+
47
+ return pipeline(
48
+ prompt=request.prompt,
49
+ negative_prompt=request.negative_prompt,
50
+ width=request.width,
51
+ height=request.height,
52
+ generator=generator,
53
+ num_inference_steps=11,
54
+ ).images[0]