benjamin-paine commited on
Commit
c817021
1 Parent(s): b00eff9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +78 -0
README.md CHANGED
@@ -1,3 +1,81 @@
1
  ---
2
  license: openrail++
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: openrail++
3
  ---
4
+ This repository contains offset versions of https://huggingface.co/mhdang/dpo-sdxl-text2image-v1 and https://huggingface.co/mhdang/dpo-sd1.5-text2image-v1.
5
+
6
+ These can be added directly to any initialized UNet to inject DPO training into it. See the code below for usage (diffusers only.)
7
+
8
+ ```py
9
+ def inject_dpo(unet: UNet2DConditionModel, dpo_path: str, strict: bool = False) -> None:
10
+ """
11
+ Injects DPO weights directly into your UNet.
12
+
13
+ Args:
14
+ unet (`UNet2DConditionModel`)
15
+ The initialized UNet from your pipeline.
16
+ dpo_path (`str`)
17
+ The path to the `.safetensors` file downloaded from https://huggingface.co/benjamin-paine/sd-dpo-offsets/.
18
+ Make sure you're using the right file for the right base model.
19
+ strict (`bool`, *optional*)
20
+ Whether or not to raise errors when a weight cannot be applied. Defaults to false.
21
+ """
22
+ from safetensors import safe_open
23
+ with safe_open(dpo_offset_path, framework="pt", device="cpu") as f:
24
+ for key in f.keys():
25
+ key_parts = key.split(".")
26
+ current_layer = unet
27
+ for key_part in key_parts[:-1]:
28
+ current_layer = getattr(current_layer, key_part, None)
29
+ if current_layer is None:
30
+ break
31
+ if current_layer is None:
32
+ if strict:
33
+ raise IOError(f"Couldn't find a layer to inject key {key} in.")
34
+ continue
35
+ layer_param = getattr(current_layer, key_parts[-1], None)
36
+ if layer_param is None:
37
+ if strict:
38
+ raise IOError(f"Couldn't get weighht parameter for key {key}")
39
+ layer_param.data += f.get_tensor(key)
40
+ ```
41
+ Now you can use this function like so:
42
+
43
+ ```py
44
+ from diffusers import StableDiffusionPipeline
45
+ import huggingface_hub
46
+ import torch
47
+
48
+ # load sdv15 pipeline
49
+ model_id = "Lykon/dreamshaper-8"
50
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
51
+
52
+ # download DPO offsets
53
+ dpo_path = huggingface_hub.hf_hub_download("painebenjamin/sd-dpo-offsets", "sd_v15_unet_dpo_offset.safetensors")
54
+ # inject
55
+ inject_dpo(pipe.unet, dpo_path)
56
+
57
+ # make image
58
+ prompt = "Two cats playing chess on a tree branch"
59
+ image = pipe(prompt, guidance_scale=7.5).images[0]
60
+ image.save("cats_playing_chess.png")
61
+ ```
62
+
63
+ Or for XL:
64
+
65
+ ```py
66
+ from diffusers import StableDiffusionXLPipeline
67
+
68
+ # load sdxl pipeline
69
+ model_id = "Lykon/dreamshaper-xl-1-0"
70
+ pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16")
71
+
72
+ # download DPO offsets
73
+ dpo_path = huggingface_hub.hf_hub_download("painebenjamin/sd-dpo-offsets", "sd_xl_unet_dpo_offset.safetensors")
74
+ # inject
75
+ inject_dpo(pipe.unet, dpo_path)
76
+
77
+ # make image
78
+ prompt = "Two cats playing chess on a tree branch"
79
+ image = pipe(prompt, guidance_scale=7.5).images[0]
80
+ image.save("cats_playing_chess.png")
81
+ ```