adhikjoshi commited on
Commit
1ca37b7
1 Parent(s): 9422209

converted using stablediffusionapi.com

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stablediffusionapi.com
5
+ - stable-diffusion-api
6
+ - text-to-image
7
+ - ultra-realistic
8
+ pinned: true
9
+ ---
10
+
11
+ # wand-magic2 API Inference
12
+
13
+ ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/10062657571680098332.png)
14
+ ## Get API Key
15
+
16
+ Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
17
+
18
+ Replace Key in below code, change **model_id** to "wand-magic2"
19
+
20
+ Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
21
+
22
+ Try model for free: [Generate Images](https://stablediffusionapi.com/models/wand-magic2)
23
+
24
+ Model link: [View model](https://stablediffusionapi.com/models/wand-magic2)
25
+
26
+ Credits: [View credits](https://civitai.com/?query=wand-magic2)
27
+
28
+ View all models: [View Models](https://stablediffusionapi.com/models)
29
+
30
+ import requests
31
+ import json
32
+
33
+ url = "https://stablediffusionapi.com/api/v3/dreambooth"
34
+
35
+ payload = json.dumps({
36
+ "key": "your_api_key",
37
+ "model_id": "wand-magic2",
38
+ "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K",
39
+ "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
40
+ "width": "512",
41
+ "height": "512",
42
+ "samples": "1",
43
+ "num_inference_steps": "30",
44
+ "safety_checker": "no",
45
+ "enhance_prompt": "yes",
46
+ "seed": None,
47
+ "guidance_scale": 7.5,
48
+ "multi_lingual": "no",
49
+ "panorama": "no",
50
+ "self_attention": "no",
51
+ "upscale": "no",
52
+ "embeddings": "embeddings_model_id",
53
+ "lora": "lora_model_id",
54
+ "webhook": None,
55
+ "track_id": None
56
+ })
57
+
58
+ headers = {
59
+ 'Content-Type': 'application/json'
60
+ }
61
+
62
+ response = requests.request("POST", url, headers=headers, data=payload)
63
+
64
+ print(response.text)
65
+
66
+ > Use this coupon code to get 25% off **DMGG0RBN**
model_index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.15.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
 
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.16.1",
4
  "feature_extractor": [
5
  "transformers",
6
  "CLIPFeatureExtractor"
safety_checker/config.json CHANGED
@@ -80,7 +80,7 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
- "transformers_version": "4.27.2",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
@@ -161,7 +161,7 @@
161
  "top_p": 1.0,
162
  "torch_dtype": null,
163
  "torchscript": false,
164
- "transformers_version": "4.27.2",
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
167
  }
 
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
+ "transformers_version": "4.28.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
 
161
  "top_p": 1.0,
162
  "torch_dtype": null,
163
  "torchscript": false,
164
+ "transformers_version": "4.28.1",
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
167
  }
safety_checker/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
- size 1216064769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba98d19732abb6b1a90fc030f61430678f9b48f5aae79be5de8e005107c8563
3
+ size 1216065214
scheduler/scheduler_config.json CHANGED
@@ -1,14 +1,18 @@
1
  {
2
  "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.15.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
 
 
8
  "num_train_timesteps": 1000,
9
  "prediction_type": "epsilon",
 
10
  "set_alpha_to_one": false,
11
  "skip_prk_steps": true,
12
  "steps_offset": 1,
 
13
  "trained_betas": null
14
  }
 
1
  {
2
  "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.16.1",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
7
  "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "dynamic_thresholding_ratio": 0.995,
10
  "num_train_timesteps": 1000,
11
  "prediction_type": "epsilon",
12
+ "sample_max_value": 1.0,
13
  "set_alpha_to_one": false,
14
  "skip_prk_steps": true,
15
  "steps_offset": 1,
16
+ "thresholding": false,
17
  "trained_betas": null
18
  }
text_encoder/config.json CHANGED
@@ -20,6 +20,6 @@
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.27.2",
24
  "vocab_size": 49408
25
  }
 
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
+ "transformers_version": "4.28.1",
24
  "vocab_size": 49408
25
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40325f6f3bcdaf9306d504d9a49a731bb0db714b0afe1710eac102f45fc19aa7
3
- size 492307041
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44a924f8476f2957dde4cf5fefaf4eb14d534134890ae327b5734926b4f4c7d0
3
+ size 492307486
tokenizer/tokenizer_config.json CHANGED
@@ -8,6 +8,7 @@
8
  "rstrip": false,
9
  "single_word": false
10
  },
 
11
  "do_lower_case": true,
12
  "eos_token": {
13
  "__type": "AddedToken",
@@ -20,7 +21,6 @@
20
  "errors": "replace",
21
  "model_max_length": 77,
22
  "pad_token": "<|endoftext|>",
23
- "special_tokens_map_file": "./special_tokens_map.json",
24
  "tokenizer_class": "CLIPTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
 
8
  "rstrip": false,
9
  "single_word": false
10
  },
11
+ "clean_up_tokenization_spaces": true,
12
  "do_lower_case": true,
13
  "eos_token": {
14
  "__type": "AddedToken",
 
21
  "errors": "replace",
22
  "model_max_length": 77,
23
  "pad_token": "<|endoftext|>",
 
24
  "tokenizer_class": "CLIPTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
unet/config.json CHANGED
@@ -1,7 +1,9 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.15.0.dev0",
4
  "act_fn": "silu",
 
 
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
7
  320,
@@ -11,9 +13,11 @@
11
  ],
12
  "center_input_sample": false,
13
  "class_embed_type": null,
 
14
  "conv_in_kernel": 3,
15
  "conv_out_kernel": 3,
16
  "cross_attention_dim": 768,
 
17
  "down_block_types": [
18
  "CrossAttnDownBlock2D",
19
  "CrossAttnDownBlock2D",
@@ -22,10 +26,12 @@
22
  ],
23
  "downsample_padding": 1,
24
  "dual_cross_attention": false,
 
25
  "flip_sin_to_cos": true,
26
  "freq_shift": 0,
27
  "in_channels": 4,
28
  "layers_per_block": 2,
 
29
  "mid_block_scale_factor": 1,
30
  "mid_block_type": "UNetMidBlock2DCrossAttn",
31
  "norm_eps": 1e-05,
@@ -34,9 +40,13 @@
34
  "only_cross_attention": false,
35
  "out_channels": 4,
36
  "projection_class_embeddings_input_dim": null,
 
 
37
  "resnet_time_scale_shift": "default",
38
  "sample_size": 64,
39
  "time_cond_proj_dim": null,
 
 
40
  "time_embedding_type": "positional",
41
  "timestep_post_act": null,
42
  "up_block_types": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.16.1",
4
  "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
  "attention_head_dim": 8,
8
  "block_out_channels": [
9
  320,
 
13
  ],
14
  "center_input_sample": false,
15
  "class_embed_type": null,
16
+ "class_embeddings_concat": false,
17
  "conv_in_kernel": 3,
18
  "conv_out_kernel": 3,
19
  "cross_attention_dim": 768,
20
+ "cross_attention_norm": null,
21
  "down_block_types": [
22
  "CrossAttnDownBlock2D",
23
  "CrossAttnDownBlock2D",
 
26
  ],
27
  "downsample_padding": 1,
28
  "dual_cross_attention": false,
29
+ "encoder_hid_dim": null,
30
  "flip_sin_to_cos": true,
31
  "freq_shift": 0,
32
  "in_channels": 4,
33
  "layers_per_block": 2,
34
+ "mid_block_only_cross_attention": null,
35
  "mid_block_scale_factor": 1,
36
  "mid_block_type": "UNetMidBlock2DCrossAttn",
37
  "norm_eps": 1e-05,
 
40
  "only_cross_attention": false,
41
  "out_channels": 4,
42
  "projection_class_embeddings_input_dim": null,
43
+ "resnet_out_scale_factor": 1.0,
44
+ "resnet_skip_time_act": false,
45
  "resnet_time_scale_shift": "default",
46
  "sample_size": 64,
47
  "time_cond_proj_dim": null,
48
+ "time_embedding_act_fn": null,
49
+ "time_embedding_dim": null,
50
  "time_embedding_type": "positional",
51
  "timestep_post_act": null,
52
  "up_block_types": [
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:526e851625440c961ee11c77634cf1905c794d77548dc4e29b549523c03123cb
3
- size 3438366373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c87b31c355489abcbed3ab31311df8ebd3f2cb3cd8125a4e2348b848eeef9b45
3
+ size 3438366838
vae/config.json CHANGED
@@ -1,7 +1,6 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.15.0.dev0",
4
- "_name_or_path": "stabilityai/sd-vae-ft-mse",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
@@ -20,7 +19,7 @@
20
  "layers_per_block": 2,
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
- "sample_size": 256,
24
  "scaling_factor": 0.18215,
25
  "up_block_types": [
26
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.16.1",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
19
  "layers_per_block": 2,
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
+ "sample_size": 512,
23
  "scaling_factor": 0.18215,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36bb8e1b54aba3a0914eb35fba13dcb107e9f18d379d1df2158732cd4bf56a94
3
- size 334711857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d3af2790f491ba035286a801bf72f7e59c8dd35d05de4563587cbcb0290982f
3
+ size 334712322