oceansweep commited on
Commit
0dcbd33
1 Parent(s): 4f8b3dc

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +89 -89
Dockerfile CHANGED
@@ -1,89 +1,89 @@
1
- # This is the same dockerfile from `Helper_Files/Dockerfiles/tldw-nvidia_amd64_Dockerfile`. c/p here so people see a 'Dockerfile' in the root directory and know what to do.
2
- # Usage
3
- # docker build -t tldw-nvidia_amd64 .
4
- # docker run --gpus=all -p 7860:7860 -v tldw_volume:/tldw tldw-nvidia_amd64
5
- #
6
- # If the above command doesn't work and it hangs on start, use the following command:
7
- #
8
- # sudo docker run -it -p 7860:7860 -v tldw_volume:/tdlw tldw-nvidia_amd64 bash
9
- #
10
- # Once in the container, run the following command:
11
- #
12
- # python summarize.py -gui
13
- #
14
- # And you should be good.
15
-
16
- # Use Nvidia image:
17
- FROM nvidia/cuda:12.6.1-cudnn-runtime-ubuntu24.04
18
-
19
- # Use an official Python runtime as a parent image
20
- #FROM python:3.10.15-slim-bookworm
21
-
22
-
23
- # Set build arguments
24
- ARG REPO_URL=https://github.com/rmusser01/tldw.git
25
- ARG BRANCH=main
26
- ARG GPU_SUPPORT=cpu
27
-
28
- # Install system dependencies
29
- RUN apt-get update && apt-get install -y \
30
- ffmpeg \
31
- libsqlite3-dev \
32
- build-essential \
33
- git \
34
- python3 \
35
- python3-pyaudio \
36
- portaudio19-dev \
37
- python3-pip \
38
- portaudio19-dev \
39
- python3-venv \
40
- && rm -rf /var/lib/apt/lists/*
41
-
42
- # Set the working directory in the container
43
- WORKDIR /tldw
44
-
45
- # Clone the repository
46
- RUN git clone -b ${BRANCH} ${REPO_URL} .
47
-
48
- # Create and activate virtual environment
49
- RUN python3 -m venv ./venv
50
- ENV PATH="/tldw/venv/bin:$PATH"
51
-
52
- # Upgrade pip and install wheel
53
- RUN pip install --upgrade pip wheel
54
-
55
- # Install CUDA
56
- RUN pip install nvidia-cublas-cu12 nvidia-cudnn-cu12
57
-
58
- # setup PATH
59
- RUN export LD_LIBRARY_PATH=`python3 -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'`
60
-
61
-
62
- # Install PyTorch based on GPU support
63
- RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \
64
- pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu123; \
65
- elif [ "$GPU_SUPPORT" = "amd" ]; then \
66
- pip install torch-directml; \
67
- else \
68
- pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu; \
69
- fi
70
-
71
- # Install other requirements
72
- RUN pip install -r requirements.txt
73
-
74
- # Update config.txt for CPU if needed
75
- RUN if [ "$GPU_SUPPORT" = "cpu" ]; then \
76
- sed -i 's/cuda/cpu/' ./Config_Files/config.txt; \
77
- fi
78
-
79
- # Create a volume for persistent storage
80
- VOLUME /tldw
81
-
82
- # Make port 7860 available to the world outside this container
83
- EXPOSE 7860
84
-
85
- # Set listening to all interfaces
86
- ENV GRADIO_SERVER_NAME="0.0.0.0"
87
-
88
- # Run the application
89
- CMD ["python", "summarize.py", "-gui"]
 
1
+ # This is the same dockerfile from `Helper_Files/Dockerfiles/tldw-nvidia_amd64_Dockerfile`. c/p here so people see a 'Dockerfile' in the root directory and know what to do.
2
+ # Usage
3
+ # docker build -t tldw-nvidia_amd64 .
4
+ # docker run --gpus=all -p 7860:7860 -v tldw_volume:/tldw tldw-nvidia_amd64
5
+ #
6
+ # If the above command doesn't work and it hangs on start, use the following command:
7
+ #
8
+ # sudo docker run -it -p 7860:7860 -v tldw_volume:/tdlw tldw-nvidia_amd64 bash
9
+ #
10
+ # Once in the container, run the following command:
11
+ #
12
+ # python summarize.py -gui
13
+ #
14
+ # And you should be good.
15
+
16
+ # Use Nvidia image:
17
+ FROM nvidia/cuda:12.6.1-cudnn-runtime-ubuntu24.04
18
+
19
+ # Use an official Python runtime as a parent image
20
+ #FROM python:3.10.15-slim-bookworm
21
+
22
+
23
+ # Set build arguments
24
+ ARG REPO_URL=https://github.com/rmusser01/tldw.git
25
+ ARG BRANCH=main
26
+ ARG GPU_SUPPORT=cpu
27
+
28
+ # Install system dependencies
29
+ RUN apt-get update && apt-get install -y \
30
+ ffmpeg \
31
+ libsqlite3-dev \
32
+ build-essential \
33
+ git \
34
+ python3 \
35
+ python3-pyaudio \
36
+ portaudio19-dev \
37
+ python3-pip \
38
+ portaudio19-dev \
39
+ python3-venv \
40
+ && rm -rf /var/lib/apt/lists/*
41
+
42
+ # Set the working directory in the container
43
+ WORKDIR /tldw
44
+
45
+ # Clone the repository
46
+ RUN git clone -b ${BRANCH} ${REPO_URL} .
47
+
48
+ # Create and activate virtual environment
49
+ RUN python3 -m venv ./venv
50
+ ENV PATH="/tldw/venv/bin:$PATH"
51
+
52
+ # Upgrade pip and install wheel
53
+ RUN pip install --upgrade pip wheel
54
+
55
+ # Install CUDA
56
+ RUN pip install nvidia-cublas-cu12 nvidia-cudnn-cu12
57
+
58
+ # setup PATH
59
+ RUN export LD_LIBRARY_PATH=`python3 -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'`
60
+
61
+
62
+ # Install PyTorch based on GPU support
63
+ RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \
64
+ pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu123; \
65
+ elif [ "$GPU_SUPPORT" = "amd" ]; then \
66
+ pip install torch-directml; \
67
+ else \
68
+ pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu; \
69
+ fi
70
+
71
+ # Install other requirements
72
+ RUN pip install -r requirements.txt
73
+
74
+ # Update config.txt for CPU if needed
75
+ RUN if [ "$GPU_SUPPORT" = "cpu" ]; then \
76
+ sed -i 's/cuda/cpu/' ./Config_Files/config.txt; \
77
+ fi
78
+
79
+ # Create a volume for persistent storage
80
+ VOLUME /tldw
81
+
82
+ # Make port 7860 available to the world outside this container
83
+ EXPOSE 7860
84
+
85
+ # Set listening to all interfaces
86
+ ENV GRADIO_SERVER_NAME="0.0.0.0"
87
+
88
+ # Run the application
89
+ CMD ["python", "summarize.py", "-gui", "-log DEBUG"]