AniPortrait-Vid2Vid / Dockerfile
fffiloni's picture
Update Dockerfile
116ca94 verified
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
ENV DEBIAN_FRONTEND=noninteractive
# Set the MKL_THREADING_LAYER environment variable to GNU
ENV MKL_THREADING_LAYER=GNU
# Install Git, OpenGL libraries, and libglib2.0
RUN apt-get update && apt-get install -y git libgl1-mesa-glx libglib2.0-0
RUN apt-get update && apt-get install -y ninja-build
# Install necessary dependencies, including CMake, a C++ compiler, and others
RUN apt-get update && apt-get install -y unzip ffmpeg cmake g++ build-essential espeak-ng aria2
# Set up a new user named "user" with user ID 1000
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set environment variables
ENV HOME=/home/user \
CUDA_HOME=/usr/local/cuda \
PATH=/home/user/.local/bin:$PATH \
LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH} \
LIBRARY_PATH=${CUDA_HOME}/lib64/stubs:${LIBRARY_PATH} \
PYTHONPATH=$HOME/app \
PYTHONUNBUFFERED=1 \
GRADIO_ALLOW_FLAGGING=never \
GRADIO_NUM_PORTS=1 \
GRADIO_SERVER_NAME=0.0.0.0 \
GRADIO_THEME=huggingface \
GRADIO_SHARE=False \
SYSTEM=spaces
# Set the working directory to the user's home directory
WORKDIR $HOME/app
# Clone your repository or add your code to the container
RUN git clone -b main https://github.com/fffiloni/AniPortrait $HOME/app
# Install dependencies
#COPY requirements.txt $HOME/app/requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Download checkpoint files using aria2
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/AniPortrait/resolve/main/audio2mesh.pt -d $HOME/app/pretrained_model -o audio2mesh.pt
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/AniPortrait/resolve/main/denoising_unet.pth -d $HOME/app/pretrained_model -o denoising_unet.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/AniPortrait/resolve/main/motion_module.pth -d $HOME/app/pretrained_model -o motion_module.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/AniPortrait/resolve/main/pose_guider.pth -d $HOME/app/pretrained_model -o pose_guider.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/AniPortrait/resolve/main/reference_unet.pth -d $HOME/app/pretrained_model -o reference_unet.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/denoising_unet.pth -d $HOME/app/pretrained_model -o denoising_unet.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/guidance_encoder_depth.pth -d $HOME/app/pretrained_model -o guidance_encoder_depth.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/guidance_encoder_dwpose.pth -d $HOME/app/pretrained_model -o guidance_encoder_dwpose.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/guidance_encoder_normal.pth -d $HOME/app/pretrained_model -o guidance_encoder_normal.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/guidance_encoder_semantic_map.pth -d $HOME/app/pretrained_model -o guidance_encoder_semantic_map.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/motion_module.pth -d $HOME/app/pretrained_model -o motion_module.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/champ/reference_unet.pth -d $HOME/app/pretrained_model -o reference_unet.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/image_encoder/config.json -d $HOME/app/pretrained_model/image_encoder -o config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/image_encoder/pytorch_model.bin -d $HOME/app/pretrained_model/image_encoder -o pytorch_model.bin
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/sd-vae-ft-mse/config.json -d $HOME/app/pretrained_model/sd-vae-ft-mse -o config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/sd-vae-ft-mse/diffusion_pytorch_model.bin -d $HOME/app/pretrained_model/sd-vae-ft-mse -o diffusion_pytorch_model.bin
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/stable-diffusion-v1-5/feature_extractor/preprocessor_config.json -d $HOME/app/pretrained_model/stable-diffusion-v1-5/feature_extractor -o preprocessor_config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/stable-diffusion-v1-5/model_index.json -d $HOME/app/pretrained_model/stable-diffusion-v1-5 -o model_index.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/stable-diffusion-v1-5/unet/config.json -d $HOME/app/pretrained_model/stable-diffusion-v1-5/unet -o config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/resolve/main/stable-diffusion-v1-5/unet/diffusion_pytorch_model.bin -d $HOME/app/pretrained_model/stable-diffusion-v1-5/unet -o diffusion_pytorch_model.bin
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/camenduru/champ/raw/main/stable-diffusion-v1-5/v1-inference.yaml -d $HOME/app/pretrained_model/stable-diffusion-v1-5 -o v1-inference.yaml
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/config.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/feature_extractor_config.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o feature_extractor_config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/preprocessor_config.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o preprocessor_config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/pytorch_model.bin -d $HOME/app/pretrained_model/wav2vec2-base-960h -o pytorch_model.bin
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/special_tokens_map.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o special_tokens_map.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tf_model.h5 -d $HOME/app/pretrained_model/wav2vec2-base-960h -o tf_model.h5
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/tokenizer_config.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o tokenizer_config.json
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/facebook/wav2vec2-base-960h/raw/main/vocab.json -d $HOME/app/pretrained_model/wav2vec2-base-960h -o vocab.json
# Set the environment variable to specify the GPU device
ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
ENV CUDA_VISIBLE_DEVICES=0
RUN mkdir examples
COPY examples examples
COPY app_hf.py .
# Run your app.py script
CMD ["python", "app_hf.py"]