Your Name commited on
Commit
0881b9c
β€’
1 Parent(s): 9f1d2e9
Files changed (48) hide show
  1. {feature_extractor β†’ anime/feature_extractor}/preprocessor_config.json +0 -0
  2. model_index.json β†’ anime/model_index.json +0 -0
  3. {safety_checker β†’ anime/safety_checker}/config.json +0 -0
  4. {safety_checker β†’ anime/safety_checker}/pytorch_model.bin +0 -0
  5. {scheduler β†’ anime/scheduler}/scheduler_config.json +0 -0
  6. {text_encoder β†’ anime/text_encoder}/config.json +0 -0
  7. {text_encoder β†’ anime/text_encoder}/pytorch_model.bin +1 -1
  8. {tokenizer β†’ anime/tokenizer}/merges.txt +0 -0
  9. {tokenizer β†’ anime/tokenizer}/special_tokens_map.json +0 -0
  10. {tokenizer β†’ anime/tokenizer}/tokenizer_config.json +0 -0
  11. {tokenizer β†’ anime/tokenizer}/vocab.json +0 -0
  12. {unet β†’ anime/unet}/config.json +0 -0
  13. {unet β†’ anime/unet}/diffusion_pytorch_model.bin +1 -1
  14. {vae β†’ anime/vae}/config.json +0 -0
  15. {vae β†’ anime/vae}/diffusion_pytorch_model.bin +1 -1
  16. embeddings/kkw-NativeAmerican.pt +3 -0
  17. handler.py +237 -89
  18. lora/ASSv12-000011.safetensors +3 -0
  19. lora/BarbieCore.safetensors +3 -0
  20. lora/CyberPunkAI.safetensors +3 -0
  21. lora/EkuneSideDoggy.safetensors +3 -0
  22. lora/FutaCockCloseUp-v1.safetensors +3 -0
  23. lora/PovBlowjob-v3.safetensors +3 -0
  24. lora/dp_from_behind_v0.1b.safetensors +3 -0
  25. lora/ftm-v0.safetensors +3 -0
  26. lora/nurse_v11-05.safetensors +3 -0
  27. lora/qqq-grabbing_from_behind-v2-000006.safetensors +3 -0
  28. lora/rindou_v4.2.safetensors +3 -0
  29. lora/screaming.safetensors +3 -0
  30. lora/shibari_v20.safetensors +3 -0
  31. lora/tajnaclub_high_heelsv1.2.safetensors +3 -0
  32. lora/tgirls_V3_5.safetensors +3 -0
  33. lycoris/fapp9.safetensors +3 -0
  34. realistic/feature_extractor/preprocessor_config.json +28 -0
  35. realistic/model_index.json +33 -0
  36. realistic/safety_checker/config.json +168 -0
  37. realistic/safety_checker/pytorch_model.bin +3 -0
  38. realistic/scheduler/scheduler_config.json +15 -0
  39. realistic/text_encoder/config.json +24 -0
  40. realistic/text_encoder/pytorch_model.bin +3 -0
  41. realistic/tokenizer/merges.txt +0 -0
  42. realistic/tokenizer/special_tokens_map.json +24 -0
  43. realistic/tokenizer/tokenizer_config.json +33 -0
  44. realistic/tokenizer/vocab.json +0 -0
  45. realistic/unet/config.json +65 -0
  46. realistic/unet/diffusion_pytorch_model.bin +3 -0
  47. realistic/vae/config.json +31 -0
  48. realistic/vae/diffusion_pytorch_model.bin +3 -0
{feature_extractor β†’ anime/feature_extractor}/preprocessor_config.json RENAMED
File without changes
model_index.json β†’ anime/model_index.json RENAMED
File without changes
{safety_checker β†’ anime/safety_checker}/config.json RENAMED
File without changes
{safety_checker β†’ anime/safety_checker}/pytorch_model.bin RENAMED
File without changes
{scheduler β†’ anime/scheduler}/scheduler_config.json RENAMED
File without changes
{text_encoder β†’ anime/text_encoder}/config.json RENAMED
File without changes
{text_encoder β†’ anime/text_encoder}/pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38a67003cd791d4fc008ae1fd24615b8b168f83cc8e853b746a7ec7bb3d64f42
3
  size 492306077
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec3883f2cc6b4a8661212175bb7ccefe9f2309888d3e53c9f274e7de60acc88a
3
  size 492306077
{tokenizer β†’ anime/tokenizer}/merges.txt RENAMED
File without changes
{tokenizer β†’ anime/tokenizer}/special_tokens_map.json RENAMED
File without changes
{tokenizer β†’ anime/tokenizer}/tokenizer_config.json RENAMED
File without changes
{tokenizer β†’ anime/tokenizer}/vocab.json RENAMED
File without changes
{unet β†’ anime/unet}/config.json RENAMED
File without changes
{unet β†’ anime/unet}/diffusion_pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b64c917a491e553494fa0eb452824cc068296273a09436ca0803b06c42046c7d
3
  size 3438366373
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8e4501ae1ba2c9950a2c06cbdd74a3317a4b4efbce54557163d114945760a0a
3
  size 3438366373
{vae β†’ anime/vae}/config.json RENAMED
File without changes
{vae β†’ anime/vae}/diffusion_pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a425a89f2e522790b3975b93ed380814e68ec77a04841dced0832cad70eab929
3
  size 334712113
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40bec3b829f93e173729fd6dca141f4488803311de360d60de1c7d3714953c0f
3
  size 334712113
embeddings/kkw-NativeAmerican.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed4b15ad30845744274acde1341783cbfc115f520a41fe3d8457537dfef15a75
3
+ size 108492
handler.py CHANGED
@@ -12,7 +12,7 @@ from typing import Union
12
  from concurrent.futures import ThreadPoolExecutor
13
  import numpy as np
14
  from PIL import ImageFilter
15
-
16
 
17
  import torch
18
  from diffusers import (
@@ -20,6 +20,7 @@ from diffusers import (
20
  DPMSolverMultistepScheduler,
21
  DPMSolverSinglestepScheduler,
22
  EulerAncestralDiscreteScheduler,
 
23
  utils,
24
  )
25
  from safetensors.torch import load_file
@@ -41,20 +42,66 @@ if device.type != "cuda":
41
 
42
  class EndpointHandler:
43
  LORA_PATHS = {
44
- "hairdetailer": str(REPO_DIR / "lora/hairdetailer.safetensors"),
45
- "lora_leica": str(REPO_DIR / "lora/lora_leica.safetensors"),
46
- "epiNoiseoffset_v2": str(REPO_DIR / "lora/epiNoiseoffset_v2.safetensors"),
47
- "MBHU-TT2FRS": str(REPO_DIR / "lora/MBHU-TT2FRS.safetensors"),
48
- "ShinyOiledSkin_v20": str(
49
- REPO_DIR / "lora/ShinyOiledSkin_v20-LoRA.safetensors"
50
- ),
51
- "polyhedron_new_skin_v1.1": str(
52
- REPO_DIR / "lora/polyhedron_new_skin_v1.1.safetensors"
53
- ),
54
- "detailed_eye-10": str(REPO_DIR / "lora/detailed_eye-10.safetensors"),
55
- "add_detail": str(REPO_DIR / "lora/add_detail.safetensors"),
56
- "MuscleGirl_v1": str(REPO_DIR / "lora/MuscleGirl_v1.safetensors"),
57
- "flat2": str(REPO_DIR / "lora/flat2.safetensors"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  }
59
 
60
  TEXTUAL_INVERSION = [
@@ -63,9 +110,13 @@ class EndpointHandler:
63
  "token": "easynegative",
64
  },
65
  {
66
- "weight_name": str(REPO_DIR / "embeddings/badhandv4.pt"),
67
  "token": "badhandv4",
68
  },
 
 
 
 
69
  {
70
  "weight_name": str(REPO_DIR / "embeddings/bad-artist-anime.pt"),
71
  "token": "bad-artist-anime",
@@ -95,56 +146,119 @@ class EndpointHandler:
95
  max_workers=1
96
  ) # Vous pouvez ajuster max_workers en fonction de vos besoins
97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  # load the optimized model
99
- self.pipe = DiffusionPipeline.from_pretrained(
100
- path,
 
 
 
 
 
 
 
 
 
 
 
 
101
  custom_pipeline="lpw_stable_diffusion", # avoid 77 token limit
102
  torch_dtype=torch.float16, # accelerate render
 
103
  )
104
- self.pipe = self.pipe.to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
  # https://stablediffusionapi.com/docs/a1111schedulers/
107
 
108
- # DPM++ 2M SDE Karras
109
- # increase step to avoid high contrast num_inference_steps=30
110
- # self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(
111
- # self.pipe.scheduler.config,
112
- # use_karras_sigmas=True,
113
- # algorithm_type="sde-dpmsolver++",
114
- # )
115
 
116
- # DPM++ 2M Karras
117
- self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(
118
- self.pipe.scheduler.config,
119
- use_karras_sigmas=True,
 
 
 
 
 
 
 
 
 
 
120
  )
121
 
122
- # Mode boulardus
123
- self.pipe.safety_checker = None
124
 
125
- # Disable progress bar
126
- self.pipe.set_progress_bar_config(disable=True)
127
 
128
- # Load negative embeddings to avoid bad hands, etc
129
- self.load_embeddings()
130
 
131
- # boosts performance by another 20%
132
- self.pipe.enable_xformers_memory_efficient_attention()
133
- self.pipe.enable_attention_slicing()
134
- # may need a requirement in the root with xformer
 
 
 
135
 
136
  # Load loras one time only
137
  # Must be replaced once we will know how to hot load/unload
138
  # it use the own made load_lora function
139
  self.load_selected_loras(
 
140
  [
141
- ["polyhedron_new_skin_v1.1", 0.2],
142
  ["detailed_eye-10", 0.2],
143
- ["add_detail", 0.3],
144
  ["MuscleGirl_v1", 0.2],
145
- ]
 
 
 
 
 
146
  )
147
 
 
 
148
  def load_lora(self, pipeline, lora_path, lora_weight=0.5):
149
  state_dict = load_file(lora_path)
150
  LORA_PREFIX_UNET = "lora_unet"
@@ -221,19 +335,19 @@ class EndpointHandler:
221
 
222
  return pipeline
223
 
224
- def load_embeddings(self):
225
  """Load textual inversions, avoid bad prompts"""
226
  for model in EndpointHandler.TEXTUAL_INVERSION:
227
- self.pipe.load_textual_inversion(
228
  ".", weight_name=model["weight_name"], token=model["token"]
229
  )
230
 
231
- def load_selected_loras(self, selections):
232
  """Load Loras models, can lead to marvelous creations"""
233
  for model_name, weight in selections:
234
- lora_path = EndpointHandler.LORA_PATHS[model_name]
235
  # self.pipe.load_lora_weights(lora_path)
236
- self.load_lora(self.pipe, lora_path, weight)
237
 
238
  def clean_negative_prompt(self, negative_prompt):
239
  """Clean negative prompt to remove already used negative prompt handlers"""
@@ -260,6 +374,30 @@ class EndpointHandler:
260
 
261
  return negative_prompt
262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
  def clean_request_data(self):
264
  """Clean up the data related to a specific request ID."""
265
 
@@ -285,11 +423,12 @@ class EndpointHandler:
285
  latents: Any,
286
  request_id: str,
287
  status: str,
 
288
  ):
289
  try:
290
  if status == "progress":
291
  # Latents to numpy
292
- img_data = self.pipe.decode_latents(latents)
293
  img_data = (img_data.squeeze() * 255).astype(np.uint8)
294
  img = Image.fromarray(img_data, "RGB")
295
 
@@ -304,6 +443,7 @@ class EndpointHandler:
304
  else:
305
  # pil object
306
  # print(latents)
 
307
  img = latents
308
 
309
  buffered = BytesIO()
@@ -335,48 +475,42 @@ class EndpointHandler:
335
 
336
  if progress >= 100:
337
  status = "complete"
 
 
 
 
 
 
338
  else:
339
  status = "in-progress"
 
340
 
341
  return {
342
  "flag": "success",
343
  "status": status,
344
  "progress": int(progress),
345
  "image": latest_image,
 
346
  }
347
 
348
- def start_inference(self, data: Dict) -> Dict:
349
- """Start a new inference."""
350
 
351
- global device
 
 
352
 
353
- # Which Lora do we load ?
354
- # selected_models = [
355
- # ("ShinyOiledSkin_v20", 0.3),
356
- # ("MBHU-TT2FRS", 0.5),
357
- # ("hairdetailer", 0.5),
358
- # ("lora_leica", 0.5),
359
- # ("epiNoiseoffset_v2", 0.5),
360
- # ]
361
 
362
- # 1. Verify input arguments
363
- required_fields = [
364
- "prompt",
365
- "negative_prompt",
366
- "width",
367
- "num_inference_steps",
368
- "height",
369
- "guidance_scale",
370
- "request_id",
371
- ]
372
 
373
- missing_fields = [field for field in required_fields if field not in data]
 
374
 
375
- if missing_fields:
376
- return {
377
- "flag": "error",
378
- "message": f"Missing fields: {', '.join(missing_fields)}",
379
- }
380
 
381
  # Now extract the fields
382
  prompt = data["prompt"]
@@ -398,18 +532,14 @@ class EndpointHandler:
398
  # Set the generator seed if provided
399
  generator = torch.Generator(device="cuda").manual_seed(seed) if seed else None
400
 
401
- # Load the provided Lora models
402
- # self.pipe.unload_lora_weights() # Unload models to avoid lora staking
403
- # if loras_model:
404
- # self.load_selected_loras(loras_model)
405
-
406
- # set scale of loras, for now take only first scale of the loaded lora and apply to all until we find the way to apply specified scale
407
- # scale = {"scale": loras_model[0][1]} if loras_model else None
408
 
409
  try:
410
  # 2. Process
411
  with autocast(device.type):
412
- image = self.pipe.text2img(
413
  prompt=prompt,
414
  guidance_scale=guidance_scale,
415
  num_inference_steps=num_inference_steps,
@@ -419,15 +549,20 @@ class EndpointHandler:
419
  generator=generator,
420
  max_embeddings_multiples=5,
421
  callback=lambda step, timestep, latents: self.progress_callback(
422
- step, timestep, latents, request_id, "progress"
423
  ),
424
  callback_steps=5,
425
- # cross_attention_kwargs={"scale": 0.2},
426
- ).images[0]
427
 
428
  # print(image)
429
  self.progress_callback(
430
- num_inference_steps, 0, image, request_id, "complete"
 
 
 
 
 
431
  )
432
 
433
  self.inference_in_progress = False
@@ -444,6 +579,7 @@ class EndpointHandler:
444
 
445
  action = data.get("action", None)
446
  request_id = data.get("request_id")
 
447
 
448
  # Check if the request_id is valid for all actions
449
  if not request_id:
@@ -458,6 +594,11 @@ class EndpointHandler:
458
  return self.check_progress(request_id)
459
 
460
  elif action == "inference":
 
 
 
 
 
461
  # Check if an inference is already in progress
462
  if self.inference_in_progress:
463
  return {
@@ -472,7 +613,14 @@ class EndpointHandler:
472
  self.inference_images[request_id] = None
473
  self.active_request_ids.append(request_id)
474
 
475
- self.executor.submit(self.start_inference, data)
 
 
 
 
 
 
 
476
 
477
  return {
478
  "flag": "success",
 
12
  from concurrent.futures import ThreadPoolExecutor
13
  import numpy as np
14
  from PIL import ImageFilter
15
+ from transformers import CLIPImageProcessor, CLIPTokenizer, CLIPModel
16
 
17
  import torch
18
  from diffusers import (
 
20
  DPMSolverMultistepScheduler,
21
  DPMSolverSinglestepScheduler,
22
  EulerAncestralDiscreteScheduler,
23
+ StableDiffusionPipeline,
24
  utils,
25
  )
26
  from safetensors.torch import load_file
 
42
 
43
  class EndpointHandler:
44
  LORA_PATHS = {
45
+ "hairdetailer": [str(REPO_DIR / "lora/hairdetailer.safetensors"), ""],
46
+ "lora_leica": [str(REPO_DIR / "lora/lora_leica.safetensors"), "leica_style"],
47
+ "epiNoiseoffset_v2": [str(REPO_DIR / "lora/epiNoiseoffset_v2.safetensors"), ""],
48
+ "MBHU-TT2FRS": [
49
+ str(REPO_DIR / "lora/MBHU-TT2FRS.safetensors"),
50
+ "flat breast, small breast, big breast, fake breast",
51
+ ],
52
+ "polyhedron_new_skin_v1.1": [
53
+ str(REPO_DIR / "lora/polyhedron_new_skin_v1.1.safetensors"),
54
+ "skin blemish, detailed skin ",
55
+ ],
56
+ "ShinyOiledSkin_v20": [
57
+ str(REPO_DIR / "lora/ShinyOiledSkin_v20-LoRA.safetensors"),
58
+ "shiny skin",
59
+ ],
60
+ "detailed_eye-10": [str(REPO_DIR / "lora/detailed_eye-10.safetensors"), ""],
61
+ "add_detail": [str(REPO_DIR / "lora/add_detail.safetensors"), ""],
62
+ "MuscleGirl_v1": [str(REPO_DIR / "lora/MuscleGirl_v1.safetensors"), "abs"],
63
+ "nurse_v11-05": [str(REPO_DIR / "lora/nurse_v11-05.safetensors"), "nurse"],
64
+ "shibari_v20": [str(REPO_DIR / "lora/shibari_v20.safetensors"), "shibari,rope"],
65
+ "tajnaclub_high_heelsv1.2": [
66
+ str(REPO_DIR / "lora/tajnaclub_high_heelsv1.2.safetensors"),
67
+ "high heels",
68
+ ],
69
+ "CyberPunkAI": [
70
+ str(REPO_DIR / "lora/CyberPunkAI.safetensors"),
71
+ "neon CyberpunkAI",
72
+ ],
73
+ "FutaCockCloseUp-v1": [
74
+ str(REPO_DIR / "lora/FutaCockCloseUp-v1.safetensors"),
75
+ "huge penis",
76
+ ],
77
+ "PovBlowjob-v3": [
78
+ str(REPO_DIR / "lora/PovBlowjob-v3.safetensors"),
79
+ "blowjob, deepthroat, kneeling, runny makeup, creampie",
80
+ ],
81
+ "dp_from_behind_v0.1b": [
82
+ str(REPO_DIR / "lora/dp_from_behind_v0.1b.safetensors"),
83
+ "1girl, 2boys, double penetration, multiple penises",
84
+ ],
85
+ "EkuneSideDoggy": [
86
+ str(REPO_DIR / "lora/EkuneSideDoggy.safetensors"),
87
+ "sidedoggystyle, doggystyle",
88
+ ],
89
+ "qqq-grabbing_from_behind-v2-000006": [
90
+ str(REPO_DIR / "lora/qqq-grabbing_from_behind-v2-000006.safetensors"),
91
+ "grabbing from behind, breast grab",
92
+ ],
93
+ "ftm-v0": [
94
+ str(REPO_DIR / "lora/ftm-v0.safetensors"),
95
+ "big mouth, tongue, long tongue",
96
+ ],
97
+ "tgirls_V3_5": [
98
+ str(REPO_DIR / "lora/tgirls_V3_5.safetensors"),
99
+ "large penis, penis, erect penis",
100
+ ],
101
+ "fapp9": [
102
+ str(REPO_DIR / "lora/fapp9.safetensors"),
103
+ "large penis, penis, erect penis",
104
+ ],
105
  }
106
 
107
  TEXTUAL_INVERSION = [
 
110
  "token": "easynegative",
111
  },
112
  {
113
+ "weight_name": str(REPO_DIR / "embeddings/kkw-NativeAmerican.pt"),
114
  "token": "badhandv4",
115
  },
116
+ {
117
+ "weight_name": str(REPO_DIR / "embeddings/badhandv4.pt"),
118
+ "token": "kkw-Afro, kkw-Asian, kkw-Euro ",
119
+ },
120
  {
121
  "weight_name": str(REPO_DIR / "embeddings/bad-artist-anime.pt"),
122
  "token": "bad-artist-anime",
 
146
  max_workers=1
147
  ) # Vous pouvez ajuster max_workers en fonction de vos besoins
148
 
149
+ realistic_path = str(REPO_DIR / "realistic/")
150
+ self.pipe_realistic, self.safety_checker = self.load_realistic(realistic_path)
151
+
152
+ anime_path = str(REPO_DIR / "anime/")
153
+ self.pipe_anime, self.pipe_anime_safety_checker = self.load_anime(anime_path)
154
+
155
+ # Load CLipImagePRocessor for NSFW check later
156
+ self.image_processor = CLIPImageProcessor.from_pretrained(
157
+ "openai/clip-vit-base-patch16"
158
+ )
159
+
160
+ def load_model_essentials(self, model_path):
161
+ """common to all models"""
162
+
163
  # load the optimized model
164
+
165
+ if "realistic" in model_path:
166
+ pipe = DiffusionPipeline.from_pretrained(
167
+ pretrained_model_name_or_path=model_path,
168
+ custom_pipeline="lpw_stable_diffusion", # avoid 77 token limit
169
+ torch_dtype=torch.float16, # accelerate render
170
+ )
171
+
172
+ safety_checker = pipe.safety_checker.to(device).to(torch.float16)
173
+ else:
174
+ safety_checker = None
175
+
176
+ pipe = DiffusionPipeline.from_pretrained(
177
+ pretrained_model_name_or_path=model_path,
178
  custom_pipeline="lpw_stable_diffusion", # avoid 77 token limit
179
  torch_dtype=torch.float16, # accelerate render
180
+ safety_checker=None, # Mode boulardus
181
  )
182
+
183
+ pipe = pipe.to(device)
184
+
185
+ # Disable progress bar
186
+ pipe.set_progress_bar_config(disable=True)
187
+
188
+ # Load negative embeddings to avoid bad hands, etc
189
+ self.load_embeddings(pipe)
190
+
191
+ # boosts performance by another 20%
192
+ pipe.enable_xformers_memory_efficient_attention()
193
+ pipe.enable_attention_slicing() # may need a requirement in the root with xformer
194
+
195
+ return pipe, safety_checker
196
+
197
+ def load_anime(self, path):
198
+ """Load anime model"""
199
+
200
+ # Init pipe
201
+ pipe, safety_checker = self.load_model_essentials(path)
202
 
203
  # https://stablediffusionapi.com/docs/a1111schedulers/
204
 
205
+ # Euler a
206
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
207
+ pipe.scheduler.config,
208
+ )
 
 
 
209
 
210
+ # Load loras one time only
211
+ # Must be replaced once we will know how to hot load/unload
212
+ # it use the own made load_lora function
213
+ self.load_selected_loras(
214
+ pipe,
215
+ [
216
+ ["detailed_eye-10", 0.3],
217
+ ["add_detail", 0.3],
218
+ ["MuscleGirl_v1", 0.3],
219
+ ["dp_from_behind_v0.1b", 0.2],
220
+ ["shibari_v20", 0.03],
221
+ ["ftm-v0", 0.03],
222
+ ["PovBlowjob-v3", 0.03],
223
+ ],
224
  )
225
 
226
+ return pipe, safety_checker
 
227
 
228
+ def load_realistic(self, path):
229
+ """Load realistic model"""
230
 
231
+ # Init pipe
232
+ pipe, safety_checker = self.load_model_essentials(path)
233
 
234
+ # https://stablediffusionapi.com/docs/a1111schedulers/
235
+
236
+ # DPM++ 2M Karras
237
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
238
+ pipe.scheduler.config,
239
+ use_karras_sigmas=True,
240
+ )
241
 
242
  # Load loras one time only
243
  # Must be replaced once we will know how to hot load/unload
244
  # it use the own made load_lora function
245
  self.load_selected_loras(
246
+ pipe,
247
  [
248
+ ["polyhedron_new_skin_v1.1", 0.25],
249
  ["detailed_eye-10", 0.2],
250
+ ["add_detail", 0.2],
251
  ["MuscleGirl_v1", 0.2],
252
+ ["tgirls_V3_5", 0.02],
253
+ ["PovBlowjob-v3", 0.02],
254
+ # ["dp_from_behind_v0.1b", 0.2],
255
+ ["shibari_v20", 0.02],
256
+ ["ftm-v0", 0.02],
257
+ ],
258
  )
259
 
260
+ return pipe, safety_checker
261
+
262
  def load_lora(self, pipeline, lora_path, lora_weight=0.5):
263
  state_dict = load_file(lora_path)
264
  LORA_PREFIX_UNET = "lora_unet"
 
335
 
336
  return pipeline
337
 
338
+ def load_embeddings(self, pipeline):
339
  """Load textual inversions, avoid bad prompts"""
340
  for model in EndpointHandler.TEXTUAL_INVERSION:
341
+ pipeline.load_textual_inversion(
342
  ".", weight_name=model["weight_name"], token=model["token"]
343
  )
344
 
345
+ def load_selected_loras(self, pipeline, selections):
346
  """Load Loras models, can lead to marvelous creations"""
347
  for model_name, weight in selections:
348
+ lora_path = EndpointHandler.LORA_PATHS[model_name][0]
349
  # self.pipe.load_lora_weights(lora_path)
350
+ self.load_lora(pipeline, lora_path, weight)
351
 
352
  def clean_negative_prompt(self, negative_prompt):
353
  """Clean negative prompt to remove already used negative prompt handlers"""
 
374
 
375
  return negative_prompt
376
 
377
+ def check_fields(self, data):
378
+ """check for fields, if some missing return error"""
379
+
380
+ # 1. Verify input arguments
381
+ required_fields = [
382
+ "prompt",
383
+ "negative_prompt",
384
+ "width",
385
+ "num_inference_steps",
386
+ "height",
387
+ "guidance_scale",
388
+ "request_id",
389
+ ]
390
+
391
+ missing_fields = [field for field in required_fields if field not in data]
392
+
393
+ if missing_fields:
394
+ return {
395
+ "flag": "error",
396
+ "message": f"Missing fields: {', '.join(missing_fields)}",
397
+ }
398
+
399
+ return False
400
+
401
  def clean_request_data(self):
402
  """Clean up the data related to a specific request ID."""
403
 
 
423
  latents: Any,
424
  request_id: str,
425
  status: str,
426
+ pipeline: Any,
427
  ):
428
  try:
429
  if status == "progress":
430
  # Latents to numpy
431
+ img_data = pipeline.decode_latents(latents)
432
  img_data = (img_data.squeeze() * 255).astype(np.uint8)
433
  img = Image.fromarray(img_data, "RGB")
434
 
 
443
  else:
444
  # pil object
445
  # print(latents)
446
+
447
  img = latents
448
 
449
  buffered = BytesIO()
 
475
 
476
  if progress >= 100:
477
  status = "complete"
478
+
479
+ # Check if Image is NSFW
480
+ image_data = base64.b64decode(latest_image)
481
+ image_io = BytesIO(image_data)
482
+ is_nsfw = self.check_nsfw(Image.open(image_io))[0]
483
+ # is_nsfw = "bypass"
484
  else:
485
  status = "in-progress"
486
+ is_nsfw = ""
487
 
488
  return {
489
  "flag": "success",
490
  "status": status,
491
  "progress": int(progress),
492
  "image": latest_image,
493
+ "is_nsfw": is_nsfw,
494
  }
495
 
496
+ def check_nsfw(self, image):
497
+ """Check if image is NSFW"""
498
 
499
+ safety_checker_input = self.image_processor(image, return_tensors="pt").to(
500
+ device
501
+ )
502
 
503
+ image, has_nsfw_concept = self.safety_checker(
504
+ images=np.array(image),
505
+ clip_input=safety_checker_input.pixel_values.to(torch.float16),
506
+ )
 
 
 
 
507
 
508
+ return has_nsfw_concept
 
 
 
 
 
 
 
 
 
509
 
510
+ def start_inference(self, pipeline, data: Dict) -> Dict:
511
+ """Start a new inference."""
512
 
513
+ global device
 
 
 
 
514
 
515
  # Now extract the fields
516
  prompt = data["prompt"]
 
532
  # Set the generator seed if provided
533
  generator = torch.Generator(device="cuda").manual_seed(seed) if seed else None
534
 
535
+ # set scale of loras (mix all loras and apply common scale, can't be indivual)
536
+ # scale = 0.25 # seems ok
537
+ # scale = 0.2
 
 
 
 
538
 
539
  try:
540
  # 2. Process
541
  with autocast(device.type):
542
+ image = pipeline.text2img(
543
  prompt=prompt,
544
  guidance_scale=guidance_scale,
545
  num_inference_steps=num_inference_steps,
 
549
  generator=generator,
550
  max_embeddings_multiples=5,
551
  callback=lambda step, timestep, latents: self.progress_callback(
552
+ step, timestep, latents, request_id, "progress", pipeline
553
  ),
554
  callback_steps=5,
555
+ # cross_attention_kwargs={"scale": 0.02},
556
+ )
557
 
558
  # print(image)
559
  self.progress_callback(
560
+ num_inference_steps,
561
+ 0,
562
+ image.images[0],
563
+ request_id,
564
+ "complete",
565
+ pipeline,
566
  )
567
 
568
  self.inference_in_progress = False
 
579
 
580
  action = data.get("action", None)
581
  request_id = data.get("request_id")
582
+ genre = data.get("genre")
583
 
584
  # Check if the request_id is valid for all actions
585
  if not request_id:
 
594
  return self.check_progress(request_id)
595
 
596
  elif action == "inference":
597
+ # Check field before doing anything
598
+ check_fields = self.check_fields(data)
599
+ if check_fields:
600
+ return check_fields
601
+
602
  # Check if an inference is already in progress
603
  if self.inference_in_progress:
604
  return {
 
613
  self.inference_images[request_id] = None
614
  self.active_request_ids.append(request_id)
615
 
616
+ # Load model according to genre
617
+ if genre == "anime":
618
+ pipe = self.pipe_anime
619
+ else:
620
+ pipe = self.pipe_realistic
621
+
622
+ self.executor.submit(self.start_inference, pipe, data)
623
+ # self.start_inference(data)
624
 
625
  return {
626
  "flag": "success",
lora/ASSv12-000011.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a5e09e70701195b0f8e359558cc1a17d26b6bd0c864010964875d7c5e68862
3
+ size 51912025
lora/BarbieCore.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6d1118c1bc942b248f07492e1ee149d118aae7916c7af9cdad1b58b2ce0c092
3
+ size 151112335
lora/CyberPunkAI.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a05b7123370fc40abdeaf6913cfec64e3d6a0633d52043e5a375d7a0ddbf7e1
3
+ size 151112443
lora/EkuneSideDoggy.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6c00be1d16bf804aaa406cc28385e18fb85aa006dd6b84848ffb06d9f04254a
3
+ size 37875655
lora/FutaCockCloseUp-v1.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926d74e0d7a7bde7ac8cb9b4487222c30d21197675b3791c690ea057be0ea21d
3
+ size 151111728
lora/PovBlowjob-v3.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca2bf4d3f7618fea43f1324d67ef220b07b6cc93d824067ad34ecf439cf9d5c
3
+ size 151118749
lora/dp_from_behind_v0.1b.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7daf59a9e7c2fbb0a728fa4cd4dfc019a8009b07eca395e08eec22b9b62d1bfd
3
+ size 9567809
lora/ftm-v0.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21be0e6c7673eefcb01d14fba3dcecc02104ce01e41c47f83c7f2156486e10d2
3
+ size 151444477
lora/nurse_v11-05.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98e01d6528f32ef47439c8cf696fb1bce7cd86f78136bfbb4b959690d7fc00e
3
+ size 19045712
lora/qqq-grabbing_from_behind-v2-000006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a7be0c8e5e418699a251c6dfefdd4a5b52de8d2dd83eb7e18cad820f4d15a98
3
+ size 37877190
lora/rindou_v4.2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27fa90277cc0e007de331d57b7414f0b7a4a42a87e2d166e518521c7fd8796e4
3
+ size 9558043
lora/screaming.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884e863e81b53539f4dac8d89b3154fb2b0996d7a72511cac580cc7596bd4a94
3
+ size 18992918
lora/shibari_v20.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1764d4d9eeebab51ecf295330f983ebd206b20688feb8fd7c2a83f784ff0406d
3
+ size 37864830
lora/tajnaclub_high_heelsv1.2.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9476e2fa9759e0bdb733825692ea2a60dd51075dfff38d7ed235d81f1ce3537e
3
+ size 18999240
lora/tgirls_V3_5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ba32950fb1457aaf3876c3c7c2c9af4fd4ea857487d09c9eb9ed4bec9e716ef
3
+ size 38001393
lycoris/fapp9.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70bfdbb77a5c1d390efb1c62cb9409541994667acdfdb942b5e0bc6d1ddda537
3
+ size 130206396
realistic/feature_extractor/preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPFeatureExtractor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
realistic/model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionPipeline",
3
+ "_diffusers_version": "0.20.0",
4
+ "feature_extractor": [
5
+ "transformers",
6
+ "CLIPFeatureExtractor"
7
+ ],
8
+ "requires_safety_checker": true,
9
+ "safety_checker": [
10
+ "stable_diffusion",
11
+ "StableDiffusionSafetyChecker"
12
+ ],
13
+ "scheduler": [
14
+ "diffusers",
15
+ "PNDMScheduler"
16
+ ],
17
+ "text_encoder": [
18
+ "transformers",
19
+ "CLIPTextModel"
20
+ ],
21
+ "tokenizer": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
realistic/safety_checker/config.json ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": "cb41f3a270d63d454d385fc2e4f571c487c253c5",
3
+ "_name_or_path": "CompVis/stable-diffusion-safety-checker",
4
+ "architectures": [
5
+ "StableDiffusionSafetyChecker"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 768,
11
+ "text_config": {
12
+ "_name_or_path": "",
13
+ "add_cross_attention": false,
14
+ "architectures": null,
15
+ "attention_dropout": 0.0,
16
+ "bad_words_ids": null,
17
+ "begin_suppress_tokens": null,
18
+ "bos_token_id": 49406,
19
+ "chunk_size_feed_forward": 0,
20
+ "cross_attention_hidden_size": null,
21
+ "decoder_start_token_id": null,
22
+ "diversity_penalty": 0.0,
23
+ "do_sample": false,
24
+ "dropout": 0.0,
25
+ "early_stopping": false,
26
+ "encoder_no_repeat_ngram_size": 0,
27
+ "eos_token_id": 49407,
28
+ "exponential_decay_length_penalty": null,
29
+ "finetuning_task": null,
30
+ "forced_bos_token_id": null,
31
+ "forced_eos_token_id": null,
32
+ "hidden_act": "quick_gelu",
33
+ "hidden_size": 768,
34
+ "id2label": {
35
+ "0": "LABEL_0",
36
+ "1": "LABEL_1"
37
+ },
38
+ "initializer_factor": 1.0,
39
+ "initializer_range": 0.02,
40
+ "intermediate_size": 3072,
41
+ "is_decoder": false,
42
+ "is_encoder_decoder": false,
43
+ "label2id": {
44
+ "LABEL_0": 0,
45
+ "LABEL_1": 1
46
+ },
47
+ "layer_norm_eps": 1e-05,
48
+ "length_penalty": 1.0,
49
+ "max_length": 20,
50
+ "max_position_embeddings": 77,
51
+ "min_length": 0,
52
+ "model_type": "clip_text_model",
53
+ "no_repeat_ngram_size": 0,
54
+ "num_attention_heads": 12,
55
+ "num_beam_groups": 1,
56
+ "num_beams": 1,
57
+ "num_hidden_layers": 12,
58
+ "num_return_sequences": 1,
59
+ "output_attentions": false,
60
+ "output_hidden_states": false,
61
+ "output_scores": false,
62
+ "pad_token_id": 1,
63
+ "prefix": null,
64
+ "problem_type": null,
65
+ "projection_dim": 512,
66
+ "pruned_heads": {},
67
+ "remove_invalid_values": false,
68
+ "repetition_penalty": 1.0,
69
+ "return_dict": true,
70
+ "return_dict_in_generate": false,
71
+ "sep_token_id": null,
72
+ "suppress_tokens": null,
73
+ "task_specific_params": null,
74
+ "temperature": 1.0,
75
+ "tf_legacy_loss": false,
76
+ "tie_encoder_decoder": false,
77
+ "tie_word_embeddings": true,
78
+ "tokenizer_class": null,
79
+ "top_k": 50,
80
+ "top_p": 1.0,
81
+ "torch_dtype": null,
82
+ "torchscript": false,
83
+ "transformers_version": "4.31.0",
84
+ "typical_p": 1.0,
85
+ "use_bfloat16": false,
86
+ "vocab_size": 49408
87
+ },
88
+ "torch_dtype": "float32",
89
+ "transformers_version": null,
90
+ "vision_config": {
91
+ "_name_or_path": "",
92
+ "add_cross_attention": false,
93
+ "architectures": null,
94
+ "attention_dropout": 0.0,
95
+ "bad_words_ids": null,
96
+ "begin_suppress_tokens": null,
97
+ "bos_token_id": null,
98
+ "chunk_size_feed_forward": 0,
99
+ "cross_attention_hidden_size": null,
100
+ "decoder_start_token_id": null,
101
+ "diversity_penalty": 0.0,
102
+ "do_sample": false,
103
+ "dropout": 0.0,
104
+ "early_stopping": false,
105
+ "encoder_no_repeat_ngram_size": 0,
106
+ "eos_token_id": null,
107
+ "exponential_decay_length_penalty": null,
108
+ "finetuning_task": null,
109
+ "forced_bos_token_id": null,
110
+ "forced_eos_token_id": null,
111
+ "hidden_act": "quick_gelu",
112
+ "hidden_size": 1024,
113
+ "id2label": {
114
+ "0": "LABEL_0",
115
+ "1": "LABEL_1"
116
+ },
117
+ "image_size": 224,
118
+ "initializer_factor": 1.0,
119
+ "initializer_range": 0.02,
120
+ "intermediate_size": 4096,
121
+ "is_decoder": false,
122
+ "is_encoder_decoder": false,
123
+ "label2id": {
124
+ "LABEL_0": 0,
125
+ "LABEL_1": 1
126
+ },
127
+ "layer_norm_eps": 1e-05,
128
+ "length_penalty": 1.0,
129
+ "max_length": 20,
130
+ "min_length": 0,
131
+ "model_type": "clip_vision_model",
132
+ "no_repeat_ngram_size": 0,
133
+ "num_attention_heads": 16,
134
+ "num_beam_groups": 1,
135
+ "num_beams": 1,
136
+ "num_channels": 3,
137
+ "num_hidden_layers": 24,
138
+ "num_return_sequences": 1,
139
+ "output_attentions": false,
140
+ "output_hidden_states": false,
141
+ "output_scores": false,
142
+ "pad_token_id": null,
143
+ "patch_size": 14,
144
+ "prefix": null,
145
+ "problem_type": null,
146
+ "projection_dim": 512,
147
+ "pruned_heads": {},
148
+ "remove_invalid_values": false,
149
+ "repetition_penalty": 1.0,
150
+ "return_dict": true,
151
+ "return_dict_in_generate": false,
152
+ "sep_token_id": null,
153
+ "suppress_tokens": null,
154
+ "task_specific_params": null,
155
+ "temperature": 1.0,
156
+ "tf_legacy_loss": false,
157
+ "tie_encoder_decoder": false,
158
+ "tie_word_embeddings": true,
159
+ "tokenizer_class": null,
160
+ "top_k": 50,
161
+ "top_p": 1.0,
162
+ "torch_dtype": null,
163
+ "torchscript": false,
164
+ "transformers_version": "4.31.0",
165
+ "typical_p": 1.0,
166
+ "use_bfloat16": false
167
+ }
168
+ }
realistic/safety_checker/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753acd54aa6d288d6c0ce9d51468eb28f495fcbaacf0edf755fa5fc7ce678cd9
3
+ size 1216062333
realistic/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "PNDMScheduler",
3
+ "_diffusers_version": "0.20.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "num_train_timesteps": 1000,
9
+ "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "timestep_spacing": "leading",
14
+ "trained_betas": null
15
+ }
realistic/text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.31.0",
23
+ "vocab_size": 49408
24
+ }
realistic/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:046e1b2cf3561e8ba31e060f94e92ec7b3c8a890bf3571933d9de1a82abda1f6
3
+ size 492306077
realistic/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
realistic/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
realistic/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
realistic/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
realistic/unet/config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.20.0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "attention_type": "default",
10
+ "block_out_channels": [
11
+ 320,
12
+ 640,
13
+ 1280,
14
+ 1280
15
+ ],
16
+ "center_input_sample": false,
17
+ "class_embed_type": null,
18
+ "class_embeddings_concat": false,
19
+ "conv_in_kernel": 3,
20
+ "conv_out_kernel": 3,
21
+ "cross_attention_dim": 768,
22
+ "cross_attention_norm": null,
23
+ "down_block_types": [
24
+ "CrossAttnDownBlock2D",
25
+ "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "DownBlock2D"
28
+ ],
29
+ "downsample_padding": 1,
30
+ "dual_cross_attention": false,
31
+ "encoder_hid_dim": null,
32
+ "encoder_hid_dim_type": null,
33
+ "flip_sin_to_cos": true,
34
+ "freq_shift": 0,
35
+ "in_channels": 4,
36
+ "layers_per_block": 2,
37
+ "mid_block_only_cross_attention": null,
38
+ "mid_block_scale_factor": 1,
39
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
40
+ "norm_eps": 1e-05,
41
+ "norm_num_groups": 32,
42
+ "num_attention_heads": null,
43
+ "num_class_embeds": null,
44
+ "only_cross_attention": false,
45
+ "out_channels": 4,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_out_scale_factor": 1.0,
48
+ "resnet_skip_time_act": false,
49
+ "resnet_time_scale_shift": "default",
50
+ "sample_size": 64,
51
+ "time_cond_proj_dim": null,
52
+ "time_embedding_act_fn": null,
53
+ "time_embedding_dim": null,
54
+ "time_embedding_type": "positional",
55
+ "timestep_post_act": null,
56
+ "transformer_layers_per_block": 1,
57
+ "up_block_types": [
58
+ "UpBlock2D",
59
+ "CrossAttnUpBlock2D",
60
+ "CrossAttnUpBlock2D",
61
+ "CrossAttnUpBlock2D"
62
+ ],
63
+ "upcast_attention": false,
64
+ "use_linear_projection": false
65
+ }
realistic/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ef2d71d90c97c9d0c98a68b5e07b8a7b859f27753509d7cc1bd2e3949343a1c
3
+ size 3438366373
realistic/vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.20.0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 512,
24
+ "scaling_factor": 0.18215,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
realistic/vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b01618945554d9840701d3453d4a9fe3db0db090164a5ed6305641306285b6f
3
+ size 334712113