Azher commited on
Commit
acc558b
1 Parent(s): 83f8e02

Upload 12 files

Browse files
model_index.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "feature_extractor": [
5
- "transformers",
6
- "CLIPFeatureExtractor"
7
  ],
8
- "requires_safety_checker": true,
9
  "safety_checker": [
10
- "stable_diffusion",
11
- "StableDiffusionSafetyChecker"
12
  ],
13
  "scheduler": [
14
  "diffusers",
 
1
  {
2
  "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.12.1",
4
  "feature_extractor": [
5
+ null,
6
+ null
7
  ],
8
+ "requires_safety_checker": false,
9
  "safety_checker": [
10
+ null,
11
+ null
12
  ],
13
  "scheduler": [
14
  "diffusers",
scheduler/scheduler_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_class_name": "PNDMScheduler",
3
- "_diffusers_version": "0.13.0.dev0",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
 
1
  {
2
  "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.12.1",
4
  "beta_end": 0.012,
5
  "beta_schedule": "scaled_linear",
6
  "beta_start": 0.00085,
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/clip-vit-large-patch14",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -19,7 +19,7 @@
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
- "torch_dtype": "float32",
23
- "transformers_version": "4.26.0",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "converted/Anything.ckpt/text_encoder",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
  "projection_dim": 768,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.24.0",
24
  "vocab_size": 49408
25
  }
text_encoder/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78caa4e74ebb38cd61f4193c06d5b355adfa13b0addc59b65249947e5aeac6ec
3
- size 492307041
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:488405b354aac33a1eae9d1e471b7aa7a390a2d0aaf8dd42bcff3daf21c49c13
3
+ size 246188833
tokenizer/tokenizer_config.json CHANGED
@@ -19,7 +19,7 @@
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
- "name_or_path": "openai/clip-vit-large-patch14",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
 
19
  },
20
  "errors": "replace",
21
  "model_max_length": 77,
22
+ "name_or_path": "converted/Anything.ckpt/tokenizer",
23
  "pad_token": "<|endoftext|>",
24
  "special_tokens_map_file": "./special_tokens_map.json",
25
  "tokenizer_class": "CLIPTokenizer",
unet/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.13.0.dev0",
 
4
  "act_fn": "silu",
5
  "attention_head_dim": 8,
6
  "block_out_channels": [
 
1
  {
2
  "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.12.1",
4
+ "_name_or_path": "converted/Anything.ckpt/unet",
5
  "act_fn": "silu",
6
  "attention_head_dim": 8,
7
  "block_out_channels": [
unet/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22976fcfe7d1f006644f385a31dc30f4fc5d984d0a4e7294342748decd7b316e
3
- size 3438366373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4daf793403f441971cacd89ed761c81ae8eca7b015db71dc90f011cbf19421e8
3
+ size 1719334053
vae/config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.13.0.dev0",
 
4
  "act_fn": "silu",
5
  "block_out_channels": [
6
  128,
@@ -20,7 +21,6 @@
20
  "norm_num_groups": 32,
21
  "out_channels": 3,
22
  "sample_size": 512,
23
- "scaling_factor": 0.18215,
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
 
1
  {
2
  "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.12.1",
4
+ "_name_or_path": "converted/Anything.ckpt/vae",
5
  "act_fn": "silu",
6
  "block_out_channels": [
7
  128,
 
21
  "norm_num_groups": 32,
22
  "out_channels": 3,
23
  "sample_size": 512,
 
24
  "up_block_types": [
25
  "UpDecoderBlock2D",
26
  "UpDecoderBlock2D",
vae/diffusion_pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:876a906810a8b2470c3092f307742f6ad8b9dbf759fb7c0ff020d0c610c996da
3
- size 334711857
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a284aabc10168a69717d8be8e3a8344edd4a67d9fdaec78f64a2530aac1285
3
+ size 167407601