adhikjoshi commited on
Commit
2f78903
1 Parent(s): d8d6500

converted using stablediffusionapi.com

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stablediffusionapi.com
5
+ - stable-diffusion-api
6
+ - text-to-image
7
+ - ultra-realistic
8
+ pinned: true
9
+ ---
10
+
11
+ # realistic vision API Inference
12
+
13
+ ![generated from stablediffusionapi.com](https://cdn2.stablediffusionapi.com/generations/8431509861693456971.png)
14
+ ## Get API Key
15
+
16
+ Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
17
+
18
+ Replace Key in below code, change **model_id** to "realistic-vision"
19
+
20
+ Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
21
+
22
+ Try model for free: [Generate Images](https://stablediffusionapi.com/models/realistic-vision)
23
+
24
+ Model link: [View model](https://stablediffusionapi.com/models/realistic-vision)
25
+
26
+ Credits: [View credits](https://civitai.com/?query=realistic%20vision)
27
+
28
+ View all models: [View Models](https://stablediffusionapi.com/models)
29
+
30
+ import requests
31
+ import json
32
+
33
+ url = "https://stablediffusionapi.com/api/v4/dreambooth"
34
+
35
+ payload = json.dumps({
36
+ "key": "your_api_key",
37
+ "model_id": "realistic-vision",
38
+ "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K",
39
+ "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
40
+ "width": "512",
41
+ "height": "512",
42
+ "samples": "1",
43
+ "num_inference_steps": "30",
44
+ "safety_checker": "no",
45
+ "enhance_prompt": "yes",
46
+ "seed": None,
47
+ "guidance_scale": 7.5,
48
+ "multi_lingual": "no",
49
+ "panorama": "no",
50
+ "self_attention": "no",
51
+ "upscale": "no",
52
+ "embeddings": "embeddings_model_id",
53
+ "lora": "lora_model_id",
54
+ "webhook": None,
55
+ "track_id": None
56
+ })
57
+
58
+ headers = {
59
+ 'Content-Type': 'application/json'
60
+ }
61
+
62
+ response = requests.request("POST", url, headers=headers, data=payload)
63
+
64
+ print(response.text)
65
+
66
+ > Use this coupon code to get 25% off **DMGG0RBN**
feature_extractor/preprocessor_config.json CHANGED
@@ -14,7 +14,7 @@
14
  0.4578275,
15
  0.40821073
16
  ],
17
- "image_processor_type": "CLIPImageProcessor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
 
14
  0.4578275,
15
  0.40821073
16
  ],
17
+ "image_processor_type": "CLIPFeatureExtractor",
18
  "image_std": [
19
  0.26862954,
20
  0.26130258,
model_index.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
- "CLIPImageProcessor"
7
  ],
8
  "requires_safety_checker": true,
9
  "safety_checker": [
 
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.20.0.dev0",
4
  "feature_extractor": [
5
  "transformers",
6
+ "CLIPFeatureExtractor"
7
  ],
8
  "requires_safety_checker": true,
9
  "safety_checker": [
safety_checker/config.json CHANGED
@@ -80,17 +80,11 @@
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
- "transformers_version": "4.25.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
87
  },
88
- "text_config_dict": {
89
- "hidden_size": 768,
90
- "intermediate_size": 3072,
91
- "num_attention_heads": 12,
92
- "num_hidden_layers": 12
93
- },
94
  "torch_dtype": "float32",
95
  "transformers_version": null,
96
  "vision_config": {
@@ -167,15 +161,8 @@
167
  "top_p": 1.0,
168
  "torch_dtype": null,
169
  "torchscript": false,
170
- "transformers_version": "4.25.1",
171
  "typical_p": 1.0,
172
  "use_bfloat16": false
173
- },
174
- "vision_config_dict": {
175
- "hidden_size": 1024,
176
- "intermediate_size": 4096,
177
- "num_attention_heads": 16,
178
- "num_hidden_layers": 24,
179
- "patch_size": 14
180
  }
181
  }
 
80
  "top_p": 1.0,
81
  "torch_dtype": null,
82
  "torchscript": false,
83
+ "transformers_version": "4.28.1",
84
  "typical_p": 1.0,
85
  "use_bfloat16": false,
86
  "vocab_size": 49408
87
  },
 
 
 
 
 
 
88
  "torch_dtype": "float32",
89
  "transformers_version": null,
90
  "vision_config": {
 
161
  "top_p": 1.0,
162
  "torch_dtype": null,
163
  "torchscript": false,
164
+ "transformers_version": "4.28.1",
165
  "typical_p": 1.0,
166
  "use_bfloat16": false
 
 
 
 
 
 
 
167
  }
168
  }
safety_checker/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d28f2b37109f222cdc33620fdd262102ac32112be0352a7f77e9614b35a394
3
- size 1216064769
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba98d19732abb6b1a90fc030f61430678f9b48f5aae79be5de8e005107c8563
3
+ size 1216065214
scheduler/scheduler_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
@@ -10,5 +10,6 @@
10
  "set_alpha_to_one": false,
11
  "skip_prk_steps": true,
12
  "steps_offset": 1,
 
13
  "trained_betas": null
14
  }
 
1
  {
2
  "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.20.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
10
  "set_alpha_to_one": false,
11
  "skip_prk_steps": true,
12
  "steps_offset": 1,
13
+ "timestep_spacing": "leading",
14
  "trained_betas": null
15
  }
text_encoder/config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -20,6 +19,6 @@
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
  "torch_dtype": "float32",
23
- "transformers_version": "4.25.1",
24
  "vocab_size": 49408
25
  }
 
1
  {
 
2
  "architectures": [
3
  "CLIPTextModel"
4
  ],
 
19
  "pad_token_id": 1,
20
  "projection_dim": 768,
21
  "torch_dtype": "float32",
22
+ "transformers_version": "4.28.1",
23
  "vocab_size": 49408
24
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:742bf042a0f6325b293d96cbd9fdc5992adac928bf32dd68f57db24ce74a0a50
3
- size 492307041
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5e8fe0598d0f88c3d88ae443c2060a879bc7bef007b75c9770462605dfe5e3f
3
+ size 492307486
tokenizer/tokenizer_config.json CHANGED
@@ -8,6 +8,7 @@
8
  "rstrip": false,
9
  "single_word": false
10
  },
 
11
  "do_lower_case": true,
12
  "eos_token": {
13
  "__type": "AddedToken",
@@ -19,9 +20,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
- "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
26
  "unk_token": {
27
  "__type": "AddedToken",
 
8
  "rstrip": false,
9
  "single_word": false
10
  },
11
+ "clean_up_tokenization_spaces": true,
12
  "do_lower_case": true,
13
  "eos_token": {
14
  "__type": "AddedToken",
 
20
  },
21
  "errors": "replace",
22
  "model_max_length": 77,
 
23
  "pad_token": "<|endoftext|>",
 
24
  "tokenizer_class": "CLIPTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
unet/config.json CHANGED
@@ -1,7 +1,10 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "act_fn": "silu",
 
 
 
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
7
  320,
@@ -11,7 +14,11 @@
11
  ],
12
  "center_input_sample": false,
13
  "class_embed_type": null,
 
 
 
14
  "cross_attention_dim": 768,
 
15
  "down_block_types": [
16
  "CrossAttnDownBlock2D",
17
  "CrossAttnDownBlock2D",
@@ -20,19 +27,32 @@
20
  ],
21
  "downsample_padding": 1,
22
  "dual_cross_attention": false,
 
 
23
  "flip_sin_to_cos": true,
24
  "freq_shift": 0,
25
  "in_channels": 4,
26
  "layers_per_block": 2,
 
27
  "mid_block_scale_factor": 1,
28
  "mid_block_type": "UNetMidBlock2DCrossAttn",
29
  "norm_eps": 1e-05,
30
  "norm_num_groups": 32,
 
31
  "num_class_embeds": null,
32
  "only_cross_attention": false,
33
  "out_channels": 4,
 
 
 
34
  "resnet_time_scale_shift": "default",
35
  "sample_size": 64,
 
 
 
 
 
 
36
  "up_block_types": [
37
  "UpBlock2D",
38
  "CrossAttnUpBlock2D",
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.20.0.dev0",
4
  "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
  "attention_head_dim": 8,
9
  "block_out_channels": [
10
  320,
 
14
  ],
15
  "center_input_sample": false,
16
  "class_embed_type": null,
17
+ "class_embeddings_concat": false,
18
+ "conv_in_kernel": 3,
19
+ "conv_out_kernel": 3,
20
  "cross_attention_dim": 768,
21
+ "cross_attention_norm": null,
22
  "down_block_types": [
23
  "CrossAttnDownBlock2D",
24
  "CrossAttnDownBlock2D",
 
27
  ],
28
  "downsample_padding": 1,
29
  "dual_cross_attention": false,
30
+ "encoder_hid_dim": null,
31
+ "encoder_hid_dim_type": null,
32
  "flip_sin_to_cos": true,
33
  "freq_shift": 0,
34
  "in_channels": 4,
35
  "layers_per_block": 2,
36
+ "mid_block_only_cross_attention": null,
37
  "mid_block_scale_factor": 1,
38
  "mid_block_type": "UNetMidBlock2DCrossAttn",
39
  "norm_eps": 1e-05,
40
  "norm_num_groups": 32,
41
+ "num_attention_heads": null,
42
  "num_class_embeds": null,
43
  "only_cross_attention": false,
44
  "out_channels": 4,
45
+ "projection_class_embeddings_input_dim": null,
46
+ "resnet_out_scale_factor": 1.0,
47
+ "resnet_skip_time_act": false,
48
  "resnet_time_scale_shift": "default",
49
  "sample_size": 64,
50
+ "time_cond_proj_dim": null,
51
+ "time_embedding_act_fn": null,
52
+ "time_embedding_dim": null,
53
+ "time_embedding_type": "positional",
54
+ "timestep_post_act": null,
55
+ "transformer_layers_per_block": 1,
56
  "up_block_types": [
57
  "UpBlock2D",
58
  "CrossAttnUpBlock2D",
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81657ef0bf8153186a5e900adbeb175e73e5695ee65b44addec7ab5d55b53327
3
- size 3438366373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5235af98664d312048812a22ebf58ef50c1289a73eeb0cd2d628ec9758314644
3
+ size 3438366838
vae/config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -14,6 +14,7 @@
14
  "DownEncoderBlock2D",
15
  "DownEncoderBlock2D"
16
  ],
 
17
  "in_channels": 3,
18
  "latent_channels": 4,
19
  "layers_per_block": 2,
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.0.dev0",
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
 
14
  "DownEncoderBlock2D",
15
  "DownEncoderBlock2D"
16
  ],
17
+ "force_upcast": true,
18
  "in_channels": 3,
19
  "latent_channels": 4,
20
  "layers_per_block": 2,
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcf4507d99b88db73f3916e2a20169fe74ada6b5582e9af56cfa80f5f3141765
3
- size 334711857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efa237bb22bfbb62a755d1b492123dffcb622323671f14856f6d85817bfa33c
3
+ size 334712578