DepthPoseEstimation / Dockerfile
mkalia's picture
Update Dockerfile
30d6ec7 verified
# Use an official Python runtime as a parent image
FROM pytorch/pytorch:2.1.0-cuda11.8-cudnn8-runtime
# Set up environment variable, Important to use gpu in the container
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES all
# FROM python:3.12.3
# Set up a new user named "user" with user ID 1000, Essential
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set home to the user's home directory
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
# Set the working directory to the user's home directory
WORKDIR $HOME/app
# Try and run pip command after setting the user with `USER user` to avoid permission issues with Python
RUN pip install --no-cache-dir --upgrade pip
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
COPY --chown=user . $HOME/app
# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# # Download the weights for streamlit app for inference from github
# RUN wget https://github.com/meghakalia/depthEstimationColonoscopy/releases/download/0.0.1/depth.pth
# RUN wget https://github.com/meghakalia/depthEstimationColonoscopy/releases/download/0.0.1/encoder.pth
# RUN wget https://github.com/meghakalia/depthEstimationColonoscopy/releases/download/0.0.1/pose.pth
# RUN wget https://github.com/meghakalia/depthEstimationColonoscopy/releases/download/0.0.1/pose_encoder.pth
# Create the .streamlit directory
RUN mkdir -p .streamlit
# Create the config.toml file and set the maxMessageSize, to display large data in the browser
# RUN echo "\
# [server]\n\
# maxMessageSize = 2000\n\
# " > .streamlit/config.toml
# Important: Make port 8501 available to the world outside this container
EXPOSE 8501
# Run filter_data_app.py when the container launches
CMD streamlit run depth_app.py --server.enableXsrfProtection false