{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Applied providers: ['CUDAExecutionProvider', 'CPUExecutionProvider'], with options: {'CUDAExecutionProvider': {'cudnn_conv_algo_search': 'EXHAUSTIVE', 'device_id': '0', 'cudnn_conv1d_pad_to_nc1d': '0', 'has_user_compute_stream': '0', 'gpu_external_alloc': '0', 'enable_cuda_graph': '0', 'gpu_mem_limit': '18446744073709551615', 'gpu_external_free': '0', 'gpu_external_empty_cache': '0', 'arena_extend_strategy': 'kNextPowerOfTwo', 'do_copy_in_default_stream': '1', 'cudnn_conv_use_max_workspace': '1', 'tunable_op_enable': '0', 'tunable_op_tuning_enable': '0', 'tunable_op_max_tuning_duration_ms': '0', 'enable_skip_layer_norm_strict_mode': '0'}, 'CPUExecutionProvider': {}}\n", "find model: C:\\Users\\doron/.insightface\\models\\buffalo_l\\1k3d68.onnx landmark_3d_68 ['None', 3, 192, 192] 0.0 1.0\n", "Applied providers: ['CUDAExecutionProvider', 'CPUExecutionProvider'], with options: {'CUDAExecutionProvider': {'cudnn_conv_algo_search': 'EXHAUSTIVE', 'device_id': '0', 'cudnn_conv1d_pad_to_nc1d': '0', 'has_user_compute_stream': '0', 'gpu_external_alloc': '0', 'enable_cuda_graph': '0', 'gpu_mem_limit': '18446744073709551615', 'gpu_external_free': '0', 'gpu_external_empty_cache': '0', 'arena_extend_strategy': 'kNextPowerOfTwo', 'do_copy_in_default_stream': '1', 'cudnn_conv_use_max_workspace': '1', 'tunable_op_enable': '0', 'tunable_op_tuning_enable': '0', 'tunable_op_max_tuning_duration_ms': '0', 'enable_skip_layer_norm_strict_mode': '0'}, 'CPUExecutionProvider': {}}\n", "find model: C:\\Users\\doron/.insightface\\models\\buffalo_l\\2d106det.onnx landmark_2d_106 ['None', 3, 192, 192] 0.0 1.0\n", "Applied providers: ['CUDAExecutionProvider', 'CPUExecutionProvider'], with options: {'CUDAExecutionProvider': {'cudnn_conv_algo_search': 'EXHAUSTIVE', 'device_id': '0', 'cudnn_conv1d_pad_to_nc1d': '0', 'has_user_compute_stream': '0', 'gpu_external_alloc': '0', 'enable_cuda_graph': '0', 'gpu_mem_limit': '18446744073709551615', 'gpu_external_free': '0', 'gpu_external_empty_cache': '0', 'arena_extend_strategy': 'kNextPowerOfTwo', 'do_copy_in_default_stream': '1', 'cudnn_conv_use_max_workspace': '1', 'tunable_op_enable': '0', 'tunable_op_tuning_enable': '0', 'tunable_op_max_tuning_duration_ms': '0', 'enable_skip_layer_norm_strict_mode': '0'}, 'CPUExecutionProvider': {}}\n", "find model: C:\\Users\\doron/.insightface\\models\\buffalo_l\\det_10g.onnx detection [1, 3, '?', '?'] 127.5 128.0\n", "Applied providers: ['CUDAExecutionProvider', 'CPUExecutionProvider'], with options: {'CUDAExecutionProvider': {'cudnn_conv_algo_search': 'EXHAUSTIVE', 'device_id': '0', 'cudnn_conv1d_pad_to_nc1d': '0', 'has_user_compute_stream': '0', 'gpu_external_alloc': '0', 'enable_cuda_graph': '0', 'gpu_mem_limit': '18446744073709551615', 'gpu_external_free': '0', 'gpu_external_empty_cache': '0', 'arena_extend_strategy': 'kNextPowerOfTwo', 'do_copy_in_default_stream': '1', 'cudnn_conv_use_max_workspace': '1', 'tunable_op_enable': '0', 'tunable_op_tuning_enable': '0', 'tunable_op_max_tuning_duration_ms': '0', 'enable_skip_layer_norm_strict_mode': '0'}, 'CPUExecutionProvider': {}}\n", "find model: C:\\Users\\doron/.insightface\\models\\buffalo_l\\genderage.onnx genderage ['None', 3, 96, 96] 0.0 1.0\n", "Applied providers: ['CUDAExecutionProvider', 'CPUExecutionProvider'], with options: {'CUDAExecutionProvider': {'cudnn_conv_algo_search': 'EXHAUSTIVE', 'device_id': '0', 'cudnn_conv1d_pad_to_nc1d': '0', 'has_user_compute_stream': '0', 'gpu_external_alloc': '0', 'enable_cuda_graph': '0', 'gpu_mem_limit': '18446744073709551615', 'gpu_external_free': '0', 'gpu_external_empty_cache': '0', 'arena_extend_strategy': 'kNextPowerOfTwo', 'do_copy_in_default_stream': '1', 'cudnn_conv_use_max_workspace': '1', 'tunable_op_enable': '0', 'tunable_op_tuning_enable': '0', 'tunable_op_max_tuning_duration_ms': '0', 'enable_skip_layer_norm_strict_mode': '0'}, 'CPUExecutionProvider': {}}\n", "find model: C:\\Users\\doron/.insightface\\models\\buffalo_l\\w600k_r50.onnx recognition ['None', 3, 112, 112] 127.5 127.5\n", "set det-size: (640, 640)\n" ] } ], "source": [ "import torch\n", "from diffusers.utils import load_image\n", "from PIL import Image\n", "from insightface.app import FaceAnalysis\n", "from insightface.utils import face_align\n", "\n", "\n", "\n", "app = FaceAnalysis(name=\"buffalo_l\", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])\n", "app.prepare(ctx_id=0, det_size=(640, 640))\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "#!pip install git+https://github.com/tencent-ailab/IP-Adapter.git" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import cv2\n", "import dlib\n", "import numpy as np\n", "from PIL import Image, ImageOps\n", "\n", "MODEL_PATH = \"shape_predictor_5_face_landmarks.dat\" # You need to download this file from http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n", "detector = dlib.get_frontal_face_detector() # Initialize dlib's face detector model\n", "\n", "def get_face_landmarks(image_path):\n", " # Load the image\n", " image = cv2.imread(image_path)\n", " try:\n", " image = ImageOps.exif_transpose(image)\n", " except:\n", " print(\"exif problem, not rotating\")\n", "\n", " gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n", "\n", " # Initialize dlib's facial landmarks predictor\n", " predictor = dlib.shape_predictor(\"shape_predictor_5_face_landmarks.dat\") \n", "\n", " # Detect faces in the image\n", " faces = detector(gray)\n", "\n", " if len(faces) > 0:\n", " # Assume the first face is the target, you can modify this based on your requirements\n", " shape = predictor(gray, faces[0])\n", " landmarks = np.array([[p.x, p.y] for p in shape.parts()])\n", " return landmarks\n", " else:\n", " return None\n", "\n", "def calculate_roll_and_yaw(landmarks):\n", " # Calculate the roll angle using the angle between the eyes\n", " roll_angle = np.degrees(np.arctan2(landmarks[1, 1] - landmarks[0, 1], landmarks[1, 0] - landmarks[0, 0]))\n", "\n", " # Calculate the yaw angle using the angle between the eyes and the tip of the nose\n", " yaw_angle = np.degrees(np.arctan2(landmarks[1, 1] - landmarks[2, 1], landmarks[1, 0] - landmarks[2, 0]))\n", "\n", " return roll_angle, yaw_angle\n", "\n", "def detect_and_crop_head(input_image_path, factor=3.0):\n", " # Get facial landmarks\n", " landmarks = get_face_landmarks(input_image_path)\n", "\n", " if landmarks is not None:\n", " # Calculate the center of the face using the mean of the landmarks\n", " center_x = int(np.mean(landmarks[:, 0]))\n", " center_y = int(np.mean(landmarks[:, 1]))\n", "\n", " # Calculate the size of the cropped region\n", " size = int(max(np.max(landmarks[:, 0]) - np.min(landmarks[:, 0]),\n", " np.max(landmarks[:, 1]) - np.min(landmarks[:, 1])) * factor)\n", "\n", " # Calculate the new coordinates for a 1:1 aspect ratio\n", " x_new = max(0, center_x - size // 2)\n", " y_new = max(0, center_y - size // 2)\n", "\n", " # Calculate roll and yaw angles\n", " roll_angle, yaw_angle = calculate_roll_and_yaw(landmarks)\n", "\n", " # Adjust the center coordinates based on the yaw and roll angles\n", " shift_x = int(size * 0.4 * np.sin(np.radians(yaw_angle)))\n", " shift_y = int(size * 0.2 * np.sin(np.radians(roll_angle)))\n", "\n", " #print(f'Roll angle: {roll_angle:.2f}, Yaw angle: {yaw_angle:.2f} shift_x: {shift_x}, shift_y: {shift_y}')\n", "\n", " center_x += shift_x\n", " center_y += shift_y\n", "\n", " # Calculate the new coordinates for a 1:1 aspect ratio\n", " x_new = max(0, center_x - size // 2)\n", " y_new = max(0, center_y - size // 2)\n", "\n", " # Read the input image using PIL\n", " image = Image.open(input_image_path)\n", "\n", " # Crop the head region with a 1:1 aspect ratio\n", " cropped_head = np.array(image.crop((x_new, y_new, x_new + size, y_new + size)))\n", "\n", " # Convert the cropped head back to PIL format\n", " cropped_head_pil = Image.fromarray(cropped_head)\n", "\n", " # Return the cropped head image\n", " return cropped_head_pil\n", " else:\n", " return None" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "from diffusers.utils import load_image\n", "from PIL import Image\n", "\n", "#image_file = \"./assets/images/darth-vader.jpg\"\n", "#image_file = \"./assets/images/vermeer-crop.jpg\"\n", "#image_file = \"./assets/images/vermeer.jpg\"\n", "\n", "#image_file1 = \"./assets/images/gal-gadot.jpg\"\n", "#image_file2 = \"./assets/images/Margot-robbie-barbiecore-outfits-lede.jpg\"\n", "\n", "image_file1 = \"./images/gal-gadot.jpg\"\n", "image_file2 = \"./images/margot-robbie.jpg\"\n", "\n", "\n", "#image = load_image(image_file)\n", "#image.resize((256, 256))" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\doron\\anaconda3\\envs\\sdxl\\lib\\site-packages\\insightface\\utils\\transform.py:68: FutureWarning: `rcond` parameter will change to the default of machine precision times ``max(M, N)`` where M and N are the input matrix dimensions.\n", "To use the future default and silence this warning we advise to pass `rcond=None`, to keep using the old, explicitly pass `rcond=-1`.\n", " P = np.linalg.lstsq(X_homo, Y)[0].T # Affine matrix. 3 x 4\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "1\n", "1\n", "exif problem, not rotating\n", "exif problem, not rotating\n" ] } ], "source": [ "import cv2\n", "image1 = cv2.imread(image_file1)\n", "image2 = cv2.imread(image_file2)\n", "\n", "faces1 = app.get(image1)\n", "print(len(faces1))\n", "faceid_embeds1 = torch.from_numpy(faces1[0].normed_embedding).unsqueeze(0)\n", "faces2 = app.get(image2)\n", "print(len(faces2))\n", "faceid_embeds2 = torch.from_numpy(faces2[0].normed_embedding).unsqueeze(0)\n", "\n", "# interpolate between the two faces\n", "faceid_embeds = torch.lerp(faceid_embeds1, faceid_embeds2, 0.5)\n", "\n", "#image1 = face_align.norm_crop(image1, landmark=faces1[0].kps, image_size=224)\n", "#image2 = face_align.norm_crop(image2, landmark=faces2[0].kps, image_size=224)\n", "\n", "img1 = detect_and_crop_head(image_file1, factor=2.5)\n", "img2 = detect_and_crop_head(image_file2, factor=2.5)\n", "\n", "image1 = cv2.cvtColor(np.array(img1.resize((224,224))), cv2.COLOR_RGB2BGR)\n", "image2 = cv2.cvtColor(np.array(img2.resize((224,224))), cv2.COLOR_RGB2BGR)\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "#faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)\n", "\n", "#faceid_embeds.shape" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "bin c:\\Users\\doron\\anaconda3\\envs\\sdxl\\lib\\site-packages\\bitsandbytes\\libbitsandbytes_cuda118.dll\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "A matching Triton is not available, some optimizations will not be enabled.\n", "Error caught was: No module named 'triton'\n" ] } ], "source": [ "from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL\n", "from huggingface_hub import hf_hub_download\n", "from PIL import Image\n", "\n", "#from ip_adapter.ip_adapter_faceid import IPAdapterFaceID\n", "from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDPlus\n", "\n", "base_model_path = \"SG161222/Realistic_Vision_V5.1_noVAE\"\n", "#base_model_path = \"dreamlike-art/dreamlike-anime-1.0\"\n", "vae_model_path = \"stabilityai/sd-vae-ft-mse\"\n", "ip_ckpt = hf_hub_download(repo_id=\"h94/IP-Adapter-FaceID\", filename=\"ip-adapter-faceid-plusv2_sd15.bin\", repo_type=\"model\")\n", "image_encoder_path = \"laion/CLIP-ViT-H-14-laion2B-s32B-b79K\"\n", "device = \"cuda\"" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def image_grid(imgs, rows, cols):\n", " assert len(imgs) == rows*cols\n", "\n", " w, h = imgs[0].size\n", " grid = Image.new('RGB', size=(cols*w, rows*h))\n", " grid_w, grid_h = grid.size\n", " \n", " for i, img in enumerate(imgs):\n", " grid.paste(img, box=(i%cols*w, i//cols*h))\n", " return grid\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "067842f0cf5d4cd189c38967a7d6d097", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Loading pipeline components...: 0%| | 0/5 [00:00" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "print(image.shape)\n", "img1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)\n", "img2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)\n", "img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n", "pil_images = [Image.fromarray(img1), Image.fromarray(img), Image.fromarray(img2)]\n", "grid = image_grid(pil_images, 1, 3)\n", "grid" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "826a8bc66436446aa6a4c11fdfe3d860", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/30 [00:00" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "v2=True\n", "\n", "images = ip_model.generate(\n", " prompt=prompt, negative_prompt=negative_prompt, face_image=image1, faceid_embeds=faceid_embeds1, shortcut=v2, scale=0.9, s_scale=2.5, num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023\n", ")\n", "\n", "grid = image_grid(images, 2, 2)\n", "grid" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "43fd656cbd8c404e8f9b4b438f6cec03", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/30 [00:00" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "v2=True\n", "\n", "images = ip_model.generate(\n", " prompt=prompt, negative_prompt=negative_prompt, face_image=image2, faceid_embeds=faceid_embeds2, shortcut=v2, scale=0.9, s_scale=2.5, num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023\n", ")\n", "\n", "grid = image_grid(images, 2, 2)\n", "grid" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "58e6ef6e4644463f9bf3aa946747b6f4", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/30 [00:00" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "v2=True\n", "\n", "images = ip_model.generate(\n", " prompt=prompt, negative_prompt=negative_prompt, face_image=image, faceid_embeds=faceid_embeds, shortcut=v2, scale=0.9, s_scale=2.5, num_samples=4, width=512, height=768, num_inference_steps=30, seed=2023\n", ")\n", "\n", "grid = image_grid(images, 2, 2)\n", "grid" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|██████████| 5/5 [00:51<00:00, 10.23s/it]\n" ] } ], "source": [ "from pathlib import Path\n", "from tqdm import tqdm\n", "import numpy as np\n", "import os\n", "from IPython.display import clear_output\n", "\n", "seed = 2023\n", "scale=0.9\n", "width = 512\n", "height = 768\n", "steps=30\n", "num_of_results=5\n", "v2=True\n", "output_file_prefix=\"gal-margot--lerp-v2-scale2-shortcutTrue-customCrop\" + prompt.replace(\" \", \"_\")\n", "os.makedirs(output_file_prefix, exist_ok=True)\n", "\n", "#automatically prepare a list t of values between 0 and 1\n", "t_space = torch.linspace(0, 1, num_of_results)\n", "print(t_space[0])\n", "\n", "idx = 0\n", "for t in tqdm(t_space):\n", " print(f\"t = {t}\")\n", " mix_factor = t.item()\n", " image = (image1 * (1 - mix_factor) + image2 * mix_factor).astype(np.uint8)\n", " # interpolate between the two faces\n", " faceid_embeds = torch.lerp(faceid_embeds1, faceid_embeds2, t)\n", " images = ip_model.generate(prompt=prompt, negative_prompt=negative_prompt, face_image=image, faceid_embeds=faceid_embeds, shortcut=v2, s_scale=2.5, num_samples=4, scale=scale, width=width, height=height, num_inference_steps=30, seed=seed)\n", " grid = image_grid(images, 2, 2)\n", " grid.save(f\"{output_file_prefix}/output-{seed}_{steps}_{int(scale * 100)}--{str(idx).zfill(4)}.jpg\")\n", " idx += 1\n", " clear_output(wait=True)" ] } ], "metadata": { "kernelspec": { "display_name": "sdxl", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.9" } }, "nbformat": 4, "nbformat_minor": 2 }