{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "id": "5rbnEz_FTJKL" }, "outputs": [], "source": [ "\n", "######################################################################\n", "!pip install -q --upgrade diffusers\n", "!pip install -q --upgrade transformers\n", "!pip install -q --upgrade tokenizers\n", "######################################################################\n", "!pip install -q --upgrade peft\n", "######################################################################\n", "#!pip install diffusers==0.21.1 transformers==4.33.2 tokenizers==0.13.3\n", "######################################################################\n", "\n", "!pip install -q accelerate\n", "!pip install -q safetensors\n", "!pip install -q einops\n", "!pip install -q onnxruntime-gpu\n", "!pip install -q dlib\n", "!pip install -q opencv-python\n", "!pip install -q git+https://github.com/tencent-ailab/IP-Adapter.git" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "hThiB6kxTJKM" }, "outputs": [], "source": [ "import torch\n", "from diffusers.utils import load_image\n", "from PIL import Image\n", "import os\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "RB9wDFckTJKN" }, "outputs": [], "source": [ "\n", "os.makedirs(\"images\", exist_ok=True)\n", "!wget --continue https://upload.wikimedia.org/wikipedia/commons/thumb/0/0f/1665_Girl_with_a_Pearl_Earring.jpg/800px-1665_Girl_with_a_Pearl_Earring.jpg -O images/Girl_with_a_Pearl_Earring.jpg\n", "\n", "!wget --continue https://huggingface.co/datasets/Norod78/ip-adapter-face-full-test/resolve/main/shape_predictor_5_face_landmarks.dat?download=true -O shape_predictor_5_face_landmarks.dat\n", "\n", "!wget --continue https://huggingface.co/datasets/Norod78/ip-adapter-face-full-test/raw/main/crop_head_dlib5.py -O crop_head_dlib5.py" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "kfvMkQD6TJKP" }, "outputs": [], "source": [ "import torch\n", "from diffusers import StableDiffusionXLPipeline, DDIMScheduler, AutoencoderKL\n", "from huggingface_hub import hf_hub_download\n", "from diffusers.utils import load_image\n", "from ip_adapter import IPAdapterFull\n", "from PIL import Image\n", "\n", "def image_grid(imgs, rows, cols):\n", " assert len(imgs) == rows*cols\n", "\n", " w, h = imgs[0].size\n", " grid = Image.new('RGB', size=(cols*w, rows*h))\n", " grid_w, grid_h = grid.size\n", "\n", " for i, img in enumerate(imgs):\n", " grid.paste(img, box=(i%cols*w, i//cols*h))\n", " return grid\n", "\n", "noise_scheduler = DDIMScheduler(\n", " num_train_timesteps=1000,\n", " beta_start=0.00085,\n", " beta_end=0.012,\n", " beta_schedule=\"scaled_linear\",\n", " clip_sample=False,\n", " set_alpha_to_one=False,\n", " steps_offset=1\n", ")\n", "\n", "base_model_path = \"stabilityai/stable-diffusion-xl-base-1.0\"\n", "vae_model_path = \"madebyollin/sdxl-vae-fp16-fix\"\n", "ip_ckpt = hf_hub_download(repo_id=\"h94/IP-Adapter\", subfolder=\"sdxl_models\", filename=\"ip-adapter-plus-face_sdxl_vit-h.safetensors\", repo_type=\"model\")\n", "image_encoder_path = \"laion/CLIP-ViT-H-14-laion2B-s32B-b79K\"\n", "device = \"cuda\"\n", "\n", "with torch.no_grad():\n", " print(\"Loading vae\")\n", " vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)\n", " print(\"Loading pipeline\")\n", " pipe = StableDiffusionXLPipeline.from_pretrained(\n", " base_model_path,\n", " variant=\"fp16\",\n", " torch_dtype=torch.float16,\n", " scheduler=noise_scheduler,\n", " vae=vae,\n", " feature_extractor=None,\n", " safety_checker=None\n", " )\n", "print(\"Done\")\n", "\n" ] }, { "cell_type": "code", "source": [ "\n", "#lora_weights = hf_hub_download(repo_id=\"Norod78/SDXL-YarnArtStyle-LoRA\", filename=\"SDXL_Yarn_Art_Style.safetensors\", repo_type=\"model\")\n", "lora_weights = hf_hub_download(repo_id=\"Norod78/SDXL-Psychemelt-style-LoRA\" , filename=\"SDXL_Psychemelt_style_LoRA-000007.safetensors\", repo_type=\"model\")\n", "\n", "############################################################################################################################################\n", "pipe.load_lora_weights(lora_weights)\n", "############################################################################################################################################\n" ], "metadata": { "id": "byQMEGplF0FD" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "ksM5XrlmTJKQ" }, "outputs": [], "source": [ "from ip_adapter import IPAdapterPlusXL\n", "# load ip-adapter\n", "with torch.no_grad():\n", " ip_model = IPAdapterPlusXL(pipe, image_encoder_path, ip_ckpt, device, num_tokens=16)\n" ] }, { "cell_type": "code", "source": [ "import crop_head_dlib5\n", "# Detect and crop the head\n", "cropped_head = crop_head_dlib5.detect_and_crop_head(\"images/Girl_with_a_Pearl_Earring.jpg\", factor=4.5)\n", "image = cropped_head.resize((224, 224))\n", "image" ], "metadata": { "id": "2xz5rU5OIA-R" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "xGjbgEdBTJKQ" }, "outputs": [], "source": [ "prompt = \"full body photo of a woman standing outdoors in a field, wearing a colorful dress in an LSD trip psychemelt style\"\n", "negative_prompt = \"monochrome, lowres, bad anatomy, worst quality, low quality, blurry, extra limbs, nude, naked, nsfw\"\n", "with torch.no_grad():\n", " images = ip_model.generate(\n", " prompt=prompt, negative_prompt=negative_prompt, pil_image=image, scale=0.85, guidance_scale=7.5, num_samples=4, width=1024, height=1280, num_inference_steps=30, seed=2024,\n", " )\n", "\n", "grid = image_grid(images, 2, 2)\n", "grid\n" ] }, { "cell_type": "code", "source": [ "!wget --continue https://upload.wikimedia.org/wikipedia/commons/0/0f/SDCC_2015_-_Gal_Gadot_%2819742671671%29_%28cropped%29.jpg -O images/gal_gadot_full_body.jpg\n", "cropped_head = crop_head_dlib5.detect_and_crop_head(\"images/gal_gadot_full_body.jpg\", factor=2.5)\n", "image = cropped_head.resize((224, 224))\n", "image" ], "metadata": { "id": "RePEtucoHcyS" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "\n", "with torch.no_grad():\n", " images = ip_model.generate(\n", " prompt=prompt, negative_prompt=negative_prompt, pil_image=image, scale=0.85, guidance_scale=7.5, num_samples=4, width=1024, height=1280, num_inference_steps=30, seed=2024,\n", " )\n", "\n", "grid = image_grid(images, 2, 2)\n", "grid" ], "metadata": { "id": "Ic2r3eJoHn6B" }, "execution_count": null, "outputs": [] } ], "metadata": { "accelerator": "GPU", "colab": { "provenance": [], "machine_shape": "hm", "gpuType": "L4", "private_outputs": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.5" } }, "nbformat": 4, "nbformat_minor": 0 }