GiantAnalytics commited on
Commit
d2b9f22
1 Parent(s): bd39484

First commit small size

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ image_0.png filter=lfs diff=lfs merge=lfs -text
37
+ image_1.png filter=lfs diff=lfs merge=lfs -text
38
+ image_2.png filter=lfs diff=lfs merge=lfs -text
39
+ image_3.png filter=lfs diff=lfs merge=lfs -text
RCD-Final Logosmall size.jpg ADDED
README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: stabilityai/stable-diffusion-xl-base-1.0
3
+ library_name: diffusers
4
+ license: creativeml-openrail-m
5
+ tags:
6
+ - stable-diffusion-xl
7
+ - stable-diffusion-xl-diffusers
8
+ - text-to-image
9
+ - diffusers-training
10
+ - diffusers
11
+ inference: true
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+
18
+ # Text-to-image finetuning - GiantAnalytics/sdxl_fine_tuned_model_aditya_2
19
+
20
+ This pipeline was finetuned from **stabilityai/stable-diffusion-xl-base-1.0** on the **/content/drive/MyDrive/combine_images/** dataset. Below are some example images generated with the finetuned pipeline using the following prompt: an abstract pattern composed of organic, brushstroke-like shapes in various shades of blue, brown, and white. The shapes are arranged in a loose, overlapping pattern, creating a sense of movement and energy:
21
+
22
+ ![img_0](./image_0.png)
23
+ ![img_1](./image_1.png)
24
+ ![img_2](./image_2.png)
25
+ ![img_3](./image_3.png)
26
+
27
+
28
+ Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
29
+
30
+
31
+ ## Intended uses & limitations
32
+
33
+ #### How to use
34
+
35
+ ```python
36
+ # TODO: add an example code snippet for running this diffusion pipeline
37
+ ```
38
+
39
+ #### Limitations and bias
40
+
41
+ [TODO: provide examples of latent issues and potential remediations]
42
+
43
+ ## Training details
44
+
45
+ [TODO: describe the data used to train the model]
image_0.png ADDED

Git LFS Details

  • SHA256: 1ffcb3a155394a97e1fc5e3c60f40296b67ff9c49b4c84559aab851ada57b37e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.4 MB
image_1.png ADDED

Git LFS Details

  • SHA256: f8dd58e8defe9c9655ea80a1576d6e2f8c5835e6abc83a1bcbc68cdd5c9f2e11
  • Pointer size: 132 Bytes
  • Size of remote file: 1.66 MB
image_2.png ADDED

Git LFS Details

  • SHA256: 28eefbcd8dc642e8003fb8e627aed47ac61f37f021d64127029aae19da9bbe56
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
image_3.png ADDED

Git LFS Details

  • SHA256: 853f64a1e3c90e5381e323bbf1837bd252d7e1747f7e33114033a682c10c448a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.56 MB
model_index.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
5
+ "feature_extractor": [
6
+ null,
7
+ null
8
+ ],
9
+ "force_zeros_for_empty_prompt": true,
10
+ "image_encoder": [
11
+ null,
12
+ null
13
+ ],
14
+ "scheduler": [
15
+ "diffusers",
16
+ "EulerDiscreteScheduler"
17
+ ],
18
+ "text_encoder": [
19
+ "transformers",
20
+ "CLIPTextModel"
21
+ ],
22
+ "text_encoder_2": [
23
+ "transformers",
24
+ "CLIPTextModelWithProjection"
25
+ ],
26
+ "tokenizer": [
27
+ "transformers",
28
+ "CLIPTokenizer"
29
+ ],
30
+ "tokenizer_2": [
31
+ "transformers",
32
+ "CLIPTokenizer"
33
+ ],
34
+ "unet": [
35
+ "diffusers",
36
+ "UNet2DConditionModel"
37
+ ],
38
+ "vae": [
39
+ "diffusers",
40
+ "AutoencoderKL"
41
+ ]
42
+ }