theimageconvert2 / entklei.py
mart9992's picture
m
4c65bff
raw
history blame
9.81 kB
from io import BytesIO
import io
import random
import requests
import string
import time
from PIL import Image, ImageFilter
import numpy as np
import torch
from dw_pose.main import dwpose
from scipy.ndimage import binary_dilation
from transformers import ViTFeatureExtractor, ViTForImageClassification
import torch.nn.functional as F
import transformers
from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
import os
import pydash as _
import boto3
age_detection_model = ViTForImageClassification.from_pretrained(
'nateraw/vit-age-classifier')
age_detection_transforms = ViTFeatureExtractor.from_pretrained(
'nateraw/vit-age-classifier')
REPLICATE_API_KEY = ""
S3_REGION = "fra1"
S3_ACCESS_ID = "0RN7BZXS59HYSBD3VB79"
S3_ACCESS_SECRET = "hfSPgBlWl5jsGHa2xuByVkSpancgVeA2CVQf2EMp"
S3_ENDPOINT_URL = "https://s3.solarcom.ch"
S3_BUCKET_NAME = "pissnelke"
s3_session = boto3.session.Session()
s3 = s3_session.client(
service_name="s3",
region_name=S3_REGION,
aws_access_key_id=S3_ACCESS_ID,
aws_secret_access_key=S3_ACCESS_SECRET,
endpoint_url=S3_ENDPOINT_URL,
)
def find_bounding_box(pil_image):
image_np = np.array(pil_image.convert('L'))
white_pixels = np.argwhere(image_np == 255)
x_min, y_min = np.min(white_pixels, axis=0)
x_max, y_max = np.max(white_pixels, axis=0)
return (y_min, x_min), (y_max, x_max)
def getSizeFromCoords(top_left, bottom_right):
"""
Calculate the width and height of a bounding box.
Parameters:
bounding_box (tuple): A tuple containing two tuples,
the first is the top-left corner (x_min, y_min)
and the second is the bottom-right corner (x_max, y_max).
Returns:
tuple: A tuple containing the width and height of the bounding box.
"""
(x_min, y_min), (x_max, y_max) = top_left, bottom_right
width = x_max - x_min
height = y_max - y_min
return {"width": width, "height": height}
def crop_to_coords(coords1, coords2, pil_image):
top_left_x, top_left_y = coords1
bottom_right_x, bottom_right_y = coords2
cropped_image = pil_image.crop(
(top_left_x, top_left_y, bottom_right_x, bottom_right_y))
return cropped_image
def paste_image_at_coords(dest_image, src_image, coords):
dest_image.paste(src_image, coords)
return dest_image
def resize(width, height, maxStretch):
new_width = width * (maxStretch / max(width, height))
new_height = height * (maxStretch / max(width, height))
return {"width": new_width, "height": new_height}
def get_is_underage(input_pil):
input_pil = input_pil.convert("RGB")
inputs = age_detection_transforms(input_pil, return_tensors='pt')
output = age_detection_model(**inputs)
# Apply softmax to the logits to get probabilities
probabilities = F.softmax(output['logits'], dim=1)
# Get the class with the highest probability
predicted_class = probabilities.argmax().item()
map = {
"0": "0-2",
"1": "3-9",
"2": "10-19",
"3": "20-29",
"4": "30-39",
"5": "40-49",
"6": "50-59",
"7": "60-69",
"8": "more than 70"
}
print("Age:", map[str(predicted_class)], "years old")
if predicted_class < 3:
return True
return False
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16
)
base_pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
"redstonehero/epicrealism_pureevolutionv5-inpainting", controlnet=controlnet, torch_dtype=torch.float16
)
base_pipe.scheduler = DDIMScheduler.from_config(base_pipe.scheduler.config)
base_pipe = base_pipe.to("cuda")
base_pipe.enable_model_cpu_offload()
base_pipe.safety_checker = None
base_pipe.enable_xformers_memory_efficient_attention()
pipe_with_tit_slider = _.clone_deep(base_pipe)
pipe_with_tit_slider.load_lora_weights(os.path.join(os.environ.get(
'path', "."), "models", "breastsizeslideroffset.safetensors"), weight_name="breastsizeslideroffset.safetensors", adapter_name="breastsizeslideroffset")
pipe_with_small_tits = _.clone_deep(pipe_with_tit_slider)
pipe_with_small_tits.set_adapters("breastsizeslideroffset", adapter_weights=[-0.8])
pipe_with_medium_tits = _.clone_deep(base_pipe)
pipe_with_big_tits = _.clone_deep(pipe_with_tit_slider)
pipe_with_big_tits.set_adapters("breastsizeslideroffset", adapter_weights=[0.7])
def get_nude(original_pil, original_max_size=2000, generate_max_size=768, positive_prompt="nude girl, pussy, tits", negative_prompt="ugly", steps=20, cfg_scale=7, get_mask_function=None, with_small_tits=False, with_big_tits=False):
try:
exif_data = original_pil._getexif()
orientation_tag = 274 # The Exif tag for orientation
if exif_data is not None and orientation_tag in exif_data:
orientation = exif_data[orientation_tag]
if orientation == 3:
original_pil = original_pil.rotate(180, expand=True)
elif orientation == 6:
original_pil = original_pil.rotate(270, expand=True)
elif orientation == 8:
original_pil = original_pil.rotate(90, expand=True)
except (AttributeError, KeyError, IndexError):
# In case the Exif data is missing or corrupt, continue without rotating
pass
original_max_size = original_max_size or 2000
generate_max_size = generate_max_size or 768
positive_prompt = positive_prompt or "nude girl, pussy, tits"
negative_prompt = negative_prompt or "ugly"
steps = steps or 20
cfg_scale = cfg_scale or 7
small_original_image = original_pil.copy()
small_original_image = small_original_image.convert("RGB") # new
small_original_image.thumbnail((original_max_size, original_max_size))
start_time = time.time()
is_underage = get_is_underage(small_original_image)
print("get_is_underage", time.time() - start_time, "seconds")
if is_underage:
raise Exception("Underage")
person_mask_pil_expanded = get_mask_function(
small_original_image, "person", expand_by=20)
person_coords1, person_coords2 = find_bounding_box(
person_mask_pil_expanded)
size = getSizeFromCoords(person_coords1, person_coords2)
there_height = size["height"]
there_width = size["width"]
# Determine if the image is portrait or landscape
if there_height >= there_width:
# Portrait
there_height_to_width = there_width / there_height
then_height = 768
then_atleast_width = 768 * there_height_to_width
else:
# Landscape
there_width_to_height = there_height / there_width
then_width = 768
then_atleast_height = 768 * there_width_to_height
# Ensure dimensions are multiples of 8
if there_height >= there_width:
then_width = then_atleast_width - (then_atleast_width % 8) + 8
crop_width = there_height * then_width / then_height
crop_height = there_height
else:
then_height = then_atleast_height - (then_atleast_height % 8) + 8
crop_height = there_width * then_height / then_width
crop_width = there_width
# Calculate cropping coordinates
crop_coord_1 = (
person_coords1[0] - (crop_width - size["width"]), person_coords1[1])
crop_coord_2 = person_coords2
if (crop_coord_1[0] < 0):
crop_coord_1 = person_coords1
crop_coord_2 = (
person_coords2[0] + (crop_width - size["width"]), person_coords2[1])
person_cropped_pil = crop_to_coords(
crop_coord_1, crop_coord_2, small_original_image)
expanded_mask_image = get_mask_function(
person_cropped_pil, "bra . blouse . skirt . dress", expand_by=10)
person_cropped_width, person_cropped_height = person_cropped_pil.size
new_size = resize(crop_width, crop_height, generate_max_size)
dwpose_pil = dwpose(person_cropped_pil, 512)
expanded_mask_image_width, expanded_mask_image_height = expanded_mask_image.size
dwpose_pil_resized = dwpose_pil.resize(
(int(expanded_mask_image_width), int(expanded_mask_image_height)))
pipe = base_pipe
if with_small_tits:
pipe = pipe_with_small_tits
if with_big_tits:
pipe = pipe_with_big_tits
end_result_images = pipe(
positive_prompt,
negative_prompt=negative_prompt,
num_inference_steps=steps,
guidance_scale=cfg_scale,
eta=1.0,
image=person_cropped_pil,
mask_image=expanded_mask_image,
control_image=dwpose_pil_resized,
num_images_per_prompt=2,
height=round(new_size["height"]),
width=round(new_size["width"])
).images
# Function to create a mask for blurring edges
def create_blurred_edge_mask(image, blur_radius):
mask = Image.new("L", image.size, 0)
mask.paste(255, [blur_radius, blur_radius, mask.width -
blur_radius, mask.height - blur_radius])
return mask.filter(ImageFilter.GaussianBlur(blur_radius))
output_pils = []
# Your existing code
for image in end_result_images:
fit_into_group_image = image.resize(
(person_cropped_width, person_cropped_height))
# Create a mask for the resized image with blurred edges
blur_radius = 10 # You can adjust the radius as needed
mask = create_blurred_edge_mask(fit_into_group_image, blur_radius)
# Paste using the mask for a smoother transition
small_original_image.paste(
fit_into_group_image, (int(crop_coord_1[0]), crop_coord_1[1]), mask)
output_pils.append(small_original_image)
return output_pils
# get all files in ./dataset and get nude