{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4"
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "code",
"source": [
"!rm -rf /content/sample_data\n",
"!wget https://huggingface.co/waveydaveygravy/Moore-AnimateAnyone/resolve/main/Moore-AnimateAnyone.zip\n",
"!unzip Moore-AnimateAnyone.zip\n",
"%cd /content/Moore-AnimateAnyone\n",
"!pip install -r requirements.txt\n",
"print(\"restart session once requirements installed\")\n",
"\n",
"\n"
],
"metadata": {
"id": "SGWqEghrOl9j"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!apt -y install -qq aria2\n",
"\n",
"BaseModelUrl = \"https://huggingface.co/runwayml/stable-diffusion-v1-5\"\n",
"BaseModelDir = \"/content/Moore-AnimateAnyone/pretrainedweights/stable-diffusion-v1-5\"\n",
"\n",
"# Create the target directory and necessary subdirectories\n",
"!mkdir -p {BaseModelDir} {BaseModelDir}/vae {BaseModelDir}/unet {BaseModelDir}/tokenizer {BaseModelDir}/text_encoder {BaseModelDir}/scheduler {BaseModelDir}/safety_checker {BaseModelDir}/feature_extractor\n",
"\n",
"# Clone all model components using aria2c, specifying the correct output directories\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl} -d {BaseModelDir}\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/model_index.json -d {BaseModelDir} -o model_index.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/vae/diffusion_pytorch_model.bin -d {BaseModelDir}/vae -o diffusion_pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/vae/config.json -d {BaseModelDir}/vae -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/unet/diffusion_pytorch_model.bin -d {BaseModelDir}/unet -o diffusion_pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/unet/config.json -d {BaseModelDir}/unet -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/vocab.json -d {BaseModelDir}/tokenizer -o vocab.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/tokenizer_config.json -d {BaseModelDir}/tokenizer -o tokenizer_config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/special_tokens_map.json -d {BaseModelDir}/tokenizer -o special_tokens_map.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/tokenizer/merges.txt -d {BaseModelDir}/tokenizer -o merges.txt\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/text_encoder/pytorch_model.bin -d {BaseModelDir}/text_encoder -o pytorch_model.bin\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/text_encoder/config.json -d {BaseModelDir}/text_encoder -o config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/raw/main/scheduler/scheduler_config.json -d {BaseModelDir}/scheduler -o scheduler_config.json\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {BaseModelUrl}/resolve/main/safety_checker/pytorch_model.bin -d {BaseModelDir}/safety\n"
],
"metadata": {
"id": "f86SbtfCUgF3"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"%cd /content/Moore-AnimateAnyone/pretrainedweights\n",
"!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/denoising_unet.pth\n",
"!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/motion_module.pth\n",
"!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/pose_guider.pth\n",
"!wget https://huggingface.co/patrolli/AnimateAnyone/resolve/main/reference_unet.pth\n",
"\n",
"%cd /content/Moore-AnimateAnyone/pretrainedweights/image_encoder\n",
"!wget https://huggingface.co/lambdalabs/sd-image-variations-diffusers/resolve/main/image_encoder/pytorch_model.bin\n",
"!wget https://huggingface.co/lambdalabs/sd-image-variations-diffusers/resolve/main/image_encoder/config.json\n",
"\n",
"%cd /content/Moore-AnimateAnyone/pretrainedweights/DWpose\n",
"!wget https://huggingface.co/yzd-v/DWPose/resolve/main/yolox_l.onnx #yolox\n",
"!wget https://huggingface.co/yzd-v/DWPose/resolve/main/dw-ll_ucoco_384.onnx #dwpose"
],
"metadata": {
"id": "-ESNzmpIWHyf"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title upload pose videos\n",
"\n",
"%cd /content/Moore-AnimateAnyone/configs/inference/pose_videos\n",
"from google.colab import files\n",
"uploaded = files.upload()"
],
"metadata": {
"id": "I9GegIxu8Gnl"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title upload ref images\n",
"\n",
"%cd /content/Moore-AnimateAnyone/configs/inference/ref_images\n",
"from google.colab import files\n",
"uploaded = files.upload()"
],
"metadata": {
"id": "hUlDYEL88hhO"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title Alter these lines in /content/Moore-AnimateAnyone/configs/prompts/animation.yaml and change to path of uploaded images\n",
"test_cases:\n",
"\n",
" \"./configs/inference/ref_images/anyone-5.png\":\n",
" - \"./configs/inference/pose_videos/anyone-video-2_kps.mp4\""
],
"metadata": {
"id": "mDmcBJDp8sRO"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"#@title command line inference (need to alter yaml) ensure w,h divisible by 4 (L is best the number of seconds divided by pose video fps)\n",
"%cd /content/Moore-AnimateAnyone\n",
"!python -m scripts.pose2vid --config ./configs/prompts/animation.yaml -W 512 -H 784 -L"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "9gs2-Wbr2XwH",
"outputId": "5f638a5f-07cb-41fe-f25b-dd35e1c3b400"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"2024-01-13 22:24:41.326967: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
"2024-01-13 22:24:41.327023: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
"2024-01-13 22:24:41.328479: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
"2024-01-13 22:24:42.467706: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
"Some weights of the model checkpoint were not used when initializing UNet2DConditionModel: \n",
" ['conv_norm_out.weight, conv_norm_out.bias, conv_out.weight, conv_out.bias']\n",
"/usr/local/lib/python3.10/dist-packages/torch/_utils.py:776: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage()\n",
" return self.fget.__get__(instance, owner)()\n",
"pose video has 136 frames, with 30 fps\n",
"/content/Moore-AnimateAnyone/src/pipelines/pipeline_pose2vid_long.py:406: FutureWarning: Accessing config attribute `in_channels` directly via 'UNet3DConditionModel' object attribute is deprecated. Please access 'in_channels' over 'UNet3DConditionModel's config object instead, e.g. 'unet.config.in_channels'.\n",
" num_channels_latents = self.denoising_unet.in_channels\n",
"100% 30/30 [20:00<00:00, 40.03s/it]\n",
"100% 64/64 [00:17<00:00, 3.63it/s]\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"from IPython.display import HTML\n",
"from base64 import b64encode\n",
"\n",
"# Open the video file and read its contents\n",
"mp4 = open('/content/Moore-AnimateAnyone/output/gradio/20240113T2129.mp4', 'rb').read()\n",
"\n",
"# Encode the video data as a base64 string\n",
"data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
"\n",
"# Display the video using an HTML video element\n",
"HTML(f\"\"\"\n",
"\n",
"\"\"\")"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 221
},
"id": "UjAjpHqCtnfe",
"outputId": "d39c023a-a178-4972-fabd-8a282cb58546"
},
"execution_count": null,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
""
],
"text/html": [
"\n",
"\n"
]
},
"metadata": {},
"execution_count": 2
}
]
},
{
"cell_type": "code",
"source": [
"#@title VERY BUGGY BEST USING COMMAND LINE. YOU NEED TO HAVE A POSE VIDEO FIRST, CAN USE THE EXAMPLES. GRADIO WILL CRASH DURING GENERATION BUT SAVE TO OUTPUTS--\n",
"%cd /content/Moore-AnimateAnyone\n",
"!python /content/Moore-AnimateAnyone/app.py"
],
"metadata": {
"id": "DJxzTyuKbIX1"
},
"execution_count": null,
"outputs": []
}
]
}