README.md CHANGED
@@ -2,29 +2,20 @@
2
  license: other
3
  language:
4
  - en
5
- library_name: diffusers
6
  pipeline_tag: text-to-image
7
  tags:
8
  - art
9
  ---
10
- some random merges and or ggml conversions
11
 
12
 
13
- booru tags
14
 
15
- ## fname explanation
16
- - `yom.safetensors` | ehhh
17
- - `yompastel45.safetensors` | extra color
18
- - `yompastel65.safetensors` | more color
19
- - `yomga70.safetensors` | pretty good
20
- - `awooooooo.safetensors` | probably the best one
21
-
22
- licensed under yodayno v2:
23
  ```
24
  This license allows you to use the model, but only for non-commercial purposes. You cannot use the model or any part of it in a paid service or sell it.
25
  If you use the model on any platform, you must provide a link or reference to the original model. You must give credit to the licensor whenever you use the model.
26
  The licensor does not provide any warranty and is not liable for any damages caused by the use of the model.
27
  If you break any of the terms, this license will be terminated.
28
  This license is governed by the laws of the jurisdiction in which the licensor is located.
29
- ```
30
- take that yodayo
 
2
  license: other
3
  language:
4
  - en
 
5
  pipeline_tag: text-to-image
6
  tags:
7
  - art
8
  ---
9
+ some merges and or ggml conversions
10
 
11
 
12
+ img: booru tags, use the `/awoo/` models preferibly, as theyre the best
13
 
14
+ all non-ggml models are licensed under yodayno v2:
 
 
 
 
 
 
 
15
  ```
16
  This license allows you to use the model, but only for non-commercial purposes. You cannot use the model or any part of it in a paid service or sell it.
17
  If you use the model on any platform, you must provide a link or reference to the original model. You must give credit to the licensor whenever you use the model.
18
  The licensor does not provide any warranty and is not liable for any damages caused by the use of the model.
19
  If you break any of the terms, this license will be terminated.
20
  This license is governed by the laws of the jurisdiction in which the licensor is located.
21
+ ```
 
anithing.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:54c057fcf75b03e6847a2b29c3240097d1675690a205441ead3ec80487311a40
3
- size 4265096720
 
 
 
 
awoo/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # awoo! models
2
+
3
+ these models are actually good, some style merges will be found in `/awoostyles/` whenever i feel like merging them
4
+
5
+ - awoooooo.sfts
6
+ - - base model for the other awoo models
7
+ - awooooooo.sfts
8
+ - - more color, more better, no baked lora/ti
awoooooo.safetensors → awoo/awoooooo.safetensors RENAMED
File without changes
awooooooo.safetensors → awoo/awooooooo.safetensors RENAMED
File without changes
classic/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # classic models
2
+
3
+ these are some old models, i dont reccomend using them
4
+
5
+ - yomga.sfts
6
+ - - one of the few good ones
7
+ - more colorful
8
+ - yom.sfts
9
+ - - original one
10
+ - more dried out
11
+
12
+ the rest are not that good, apart from anithing probably
anithing-v2.safetensors → classic/anithing2.safetensors RENAMED
File without changes
ymm35x.safetensors → classic/ymm35x.safetensors RENAMED
File without changes
yom.safetensors → classic/yom.safetensors RENAMED
File without changes
yomga70.safetensors → classic/yomga.safetensors RENAMED
File without changes
yompastel45.safetensors → classic/yompastel45.safetensors RENAMED
File without changes
yompastel65.safetensors → classic/yompastel65.safetensors RENAMED
File without changes
yoms70.safetensors → classic/yoms70.safetensors RENAMED
File without changes
inpaint/README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # inpaint models
2
+
3
+ they're used to inpaint
4
+
5
+ yup
anithing-inpainting.inpainting.safetensors → inpaint/anithing.inpainting.safetensors RENAMED
File without changes
yomga-inpainting.inpainting.safetensors → inpaint/yomga.inpainting.safetensors RENAMED
File without changes
model_index.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "_class_name": "StableDiffusionPipeline",
3
- "_diffusers_version": "0.11.1",
4
- "feature_extractor": [
5
- "transformers",
6
- "CLIPImageProcessor"
7
- ],
8
- "requires_safety_checker": true,
9
- "safety_checker": [
10
- "stable_diffusion",
11
- "StableDiffusionSafetyChecker"
12
- ],
13
- "scheduler": [
14
- "diffusers",
15
- "PNDMScheduler"
16
- ],
17
- "text_encoder": [
18
- "transformers",
19
- "CLIPTextModel"
20
- ],
21
- "tokenizer": [
22
- "transformers",
23
- "CLIPTokenizer"
24
- ],
25
- "unet": [
26
- "diffusers",
27
- "UNet2DConditionModel"
28
- ],
29
- "vae": [
30
- "diffusers",
31
- "AutoencoderKL"
32
- ]
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
yom.yaml DELETED
@@ -1,70 +0,0 @@
1
- model:
2
- base_learning_rate: 1.0e-04
3
- target: ldm.models.diffusion.ddpm.LatentDiffusion
4
- params:
5
- linear_start: 0.00085
6
- linear_end: 0.0120
7
- num_timesteps_cond: 1
8
- log_every_t: 200
9
- timesteps: 1000
10
- first_stage_key: "image"
11
- cond_stage_key: "caption"
12
- image_size: 64
13
- channels: 4
14
- cond_stage_trainable: false # Note: different from the one we trained before
15
- conditioning_key: crossattn
16
- monitor: val/loss_simple_ema
17
- scale_factor: 0.18215
18
- use_ema: False
19
-
20
- scheduler_config: # 10000 warmup steps
21
- target: ldm.lr_scheduler.LambdaLinearScheduler
22
- params:
23
- warm_up_steps: [ 10000 ]
24
- cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
- f_start: [ 1.e-6 ]
26
- f_max: [ 1. ]
27
- f_min: [ 1. ]
28
-
29
- unet_config:
30
- target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
- params:
32
- image_size: 32 # unused
33
- in_channels: 4
34
- out_channels: 4
35
- model_channels: 320
36
- attention_resolutions: [ 4, 2, 1 ]
37
- num_res_blocks: 2
38
- channel_mult: [ 1, 2, 4, 4 ]
39
- num_heads: 8
40
- use_spatial_transformer: True
41
- transformer_depth: 1
42
- context_dim: 768
43
- use_checkpoint: True
44
- legacy: False
45
-
46
- first_stage_config:
47
- target: ldm.models.autoencoder.AutoencoderKL
48
- params:
49
- embed_dim: 4
50
- monitor: val/rec_loss
51
- ddconfig:
52
- double_z: true
53
- z_channels: 4
54
- resolution: 256
55
- in_channels: 3
56
- out_ch: 3
57
- ch: 128
58
- ch_mult:
59
- - 1
60
- - 2
61
- - 4
62
- - 4
63
- num_res_blocks: 2
64
- attn_resolutions: [ ]
65
- dropout: 0.0
66
- lossconfig:
67
- target: torch.nn.Identity
68
-
69
- cond_stage_config:
70
- target: ldm.modules.encoders.modules.FrozenCLIPEmbedder