# Choose an appropriate base image with CUDA support FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04 # Set the working directory in the container WORKDIR /app # Install Python, pip, and other necessary packages RUN apt-get update && apt-get install -y \ git libgl1 libglib2.0-0 \ make build-essential libssl-dev zlib1g-dev \ libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \ libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git git-lfs \ ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \ && rm -rf /var/lib/apt/lists/* \ && git lfs install \ apt-get install nvidia-container-runtime # User RUN useradd -m -u 1000 user USER user ENV HOME=/home/user \ PATH=/home/user/.local/bin:$PATH # Pyenv RUN curl https://pyenv.run | bash ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH ARG PYTHON_VERSION=3.10.12 # Python RUN pyenv install $PYTHON_VERSION && \ pyenv global $PYTHON_VERSION && \ pyenv rehash && \ pip install --no-cache-dir --upgrade pip setuptools wheel && \ pip install --no-cache-dir \ datasets \ huggingface-hub "protobuf<4" "click<8.1" # Install PyTorch with CUDA support matching the CUDA version of the base image RUN pip3 install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121 # Install flash attention and apex (if needed) RUN pip3 install packaging ninja RUN pip3 install flash-attn --no-build-isolation RUN pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git # Install xformers with the specific CUDA version RUN pip3 install -U xformers --index-url https://download.pytorch.org/whl/cu121 # Clone and install your project RUN git clone https://github.com/hpcaitech/Open-Sora /app/Open-Sora WORKDIR /app/Open-Sora RUN pip3 install -v . # Expose the port used by Gradio EXPOSE 7860 # Set the command to run your application CMD ["python3", "/app/app.py"]