{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "id": "RXcT0H3RYk9j" }, "outputs": [], "source": [ "!git clone https://github.com/MooreThreads/Moore-AnimateAnyone.git" ] }, { "cell_type": "code", "source": [ "%cd /content/Moore-AnimateAnyone\n", "!pip install -r requirements.txt\n", "!pip install https://github.com/karaokenerds/python-audio-separator/releases/download/v0.12.1/onnxruntime_gpu-1.17.0-cp310-cp310-linux_x86_64.whl" ], "metadata": { "id": "Yi_MgKanYq--" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#!mkdir /content/Moore-AnimateAnyone/pretrainedweights\n", "!mkdir /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n", "!mkdir /content/Moore-AnimateAnyone/pretrainedweights/image_encoder\n", "\n", "print(\"done\")" ], "metadata": { "id": "KQkefoBbbZ0f" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!git lfs install\n", "!git clone https://huggingface.co/patrolli/AnimateAnyone" ], "metadata": { "id": "KF4KUup3aXNG" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!apt -y install -qq aria2\n", "BaseModelUrl = \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"\n", "BaseModelDir = \"/content/Moore-AnimateAnyone/pretrainedweights/stable-diffusion-v1-5\"\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/model_index.json -d {BaseModelDir} -o model_index.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.bin -d {BaseModelDir}/vae -o diffusion_pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/vae/config.json -d {BaseModelDir}/vae -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.bin -d {BaseModelDir}/unet -o diffusion_pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/unet/config.json -d {BaseModelDir}/unet -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/vocab.json -d {BaseModelDir}/tokenizer -o vocab.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/tokenizer_config.json -d {BaseModelDir}/tokenizer -o tokenizer_config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/special_tokens_map.json -d {BaseModelDir}/tokenizer -o special_tokens_map.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/merges.txt -d {BaseModelDir}/tokenizer -o merges.txt\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/text_encoder/pytorch_model.bin -d {BaseModelDir}/text_encoder -o pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/text_encoder/config.json -d {BaseModelDir}/text_encoder -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/scheduler/scheduler_config.json -d {BaseModelDir}/scheduler -o scheduler_config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/safety_checker/pytorch_model.bin -d {BaseModelDir}/safety_checker -o pytorch_model.bin\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/safety_checker/config.json -d {BaseModelDir}/safety_checker -o config.json\n", "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/feature_extractor/preprocessor_config.json -d {BaseModelDir}/feature_extractor -o preprocessor_config.json" ], "metadata": { "id": "JAnAFo-GaKvH" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "%cd /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n", "!wget https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx #yolox\n", "!wget https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx #dwpose" ], "metadata": { "id": "y4KuiWsvc5NF" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#@title Yaml file - alter model paths to suit here then copy and paste into the original yaml file or simply alter the original file\n", "\n", "pretrained_base_model_path: \"./pretrained_weights/stable-diffusion-v1-5/\"\n", "pretrained_vae_path: \"./pretrained_weights/sd-vae-ft-mse\"\n", "image_encoder_path: \"./pretrained_weights/image_encoder\"\n", "denoising_unet_path: \"./pretrained_weights/denoising_unet.pth\"\n", "reference_unet_path: \"./pretrained_weights/reference_unet.pth\"\n", "pose_guider_path: \"./pretrained_weights/pose_guider.pth\"\n", "motion_module_path: \"./pretrained_weights/motion_module.pth\"\n", "\n", "inference_config: \"./configs/inference/inference_v2.yaml\"\n", "weight_dtype: 'fp16'\n", "\n", "test_cases:\n", " \"./configs/inference/ref_images/anyone-2.png\":\n", " - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n", " - \"./configs/inference/pose_videos/anyone-video-5_kps.mp4\"\n", " \"./configs/inference/ref_images/anyone-10.png\":\n", " - \"./configs/inference/pose_videos/anyone-video-1_kps.mp4\"\n", " - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n", " \"./configs/inference/ref_images/anyone-11.png\":\n", " - \"./configs/inference/pose_videos/anyone-video-1_kps.mp4\"\n", " - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n", " \"./configs/inference/ref_images/anyone-3.png\":\n", " - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n", " - \"./configs/inference/pose_videos/anyone-video-5_kps.mp4\"\n", " \"./configs/inference/ref_images/anyone-5.png\":\n", " - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\"\n" ], "metadata": { "id": "EZZvbXF3ewhF" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "!python /content/Moore-AnimateAnyone/app.py" ], "metadata": { "id": "z_qg7x4yZ_CN" }, "execution_count": null, "outputs": [] } ] }