cc-dsri commited on
Commit
f0d7b8f
·
1 Parent(s): 789798d

Add GPU Dockerfile and requirements

Browse files
Dockerfile.gpu ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.5.1-cudnn-runtime-ubuntu24.04
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE="1" \
4
+ PYTHONUNBUFFERED="1" \
5
+ DEBIAN_FRONTEND="noninteractive"
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && \
9
+ apt-get install -y --no-install-recommends \
10
+ python3-pip \
11
+ python-is-python3 && \
12
+ apt-get clean && \
13
+ rm -rf /var/lib/apt/lists/*
14
+
15
+ # hadolint ignore=DL3013
16
+ RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel
17
+
18
+ WORKDIR /app/
19
+
20
+ # Download models during build instead of copying from local
21
+ COPY scripts/model_download.bash /tmp/model_download.bash
22
+ RUN python3 -m pip install --no-cache-dir huggingface-hub && \
23
+ bash /tmp/model_download.bash && \
24
+ rm /tmp/model_download.bash
25
+
26
+ # Install CPU requirements
27
+ COPY requirements.cpu.txt ./
28
+ RUN python3 -m pip install --no-cache-dir -r ./requirements.cpu.txt
29
+
30
+ # Install GPU PyTorch requirements
31
+ COPY requirements.torch.gpu.txt ./
32
+ RUN python3 -m pip install --no-cache-dir -r ./requirements.torch.gpu.txt
33
+
34
+ COPY app ./app
35
+ COPY main.py ./
36
+
37
+ EXPOSE 8000
38
+
39
+ ENTRYPOINT ["python3", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
challenge-cli.py CHANGED
@@ -220,7 +220,7 @@ def upload_submission(
220
  accelerator = Accelerator(
221
  kind="GPU",
222
  gpu=AcceleratorGPU(
223
- hardwareTypes=["nvidia-l4"],
224
  count=1,
225
  ),
226
  )
 
220
  accelerator = Accelerator(
221
  kind="GPU",
222
  gpu=AcceleratorGPU(
223
+ hardwareTypes=["nvidia.com/gpu-l4"],
224
  count=1,
225
  ),
226
  )
makefile CHANGED
@@ -32,6 +32,14 @@ compile:
32
  grep -e '^torch' requirements.torch.cpu.txt.tmp >> requirements.torch.cpu.txt
33
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
34
 
 
 
 
 
 
 
 
 
35
  requirements.cpu.txt: requirements.in requirements.torch.cpu.txt | $(VENV)
36
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
37
 
 
32
  grep -e '^torch' requirements.torch.cpu.txt.tmp >> requirements.torch.cpu.txt
33
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
34
 
35
+ .PHONY: compile-gpu
36
+ compile-gpu:
37
+ uv pip compile --python-version 3.12 --upgrade -o requirements.torch.gpu.txt requirements.torch.gpu.in
38
+
39
+ .PHONY: docker-build-gpu
40
+ docker-build-gpu:
41
+ docker build -t $(IMAGE)-gpu -f Dockerfile.gpu .
42
+
43
  requirements.cpu.txt: requirements.in requirements.torch.cpu.txt | $(VENV)
44
  uv pip compile --python-version 3.12 --upgrade -o requirements.cpu.txt requirements.cpu.in
45
 
requirements.torch.gpu.in ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ torchvision
requirements.torch.gpu.txt ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile --python-version 3.12 -o requirements.torch.gpu.txt requirements.torch.gpu.in
3
+ filelock==3.20.0
4
+ # via torch
5
+ fsspec==2025.10.0
6
+ # via torch
7
+ jinja2==3.1.6
8
+ # via torch
9
+ markupsafe==3.0.3
10
+ # via jinja2
11
+ mpmath==1.3.0
12
+ # via sympy
13
+ networkx==3.6
14
+ # via torch
15
+ numpy==2.3.5
16
+ # via torchvision
17
+ nvidia-cublas-cu12==12.8.4.1
18
+ # via
19
+ # nvidia-cudnn-cu12
20
+ # nvidia-cusolver-cu12
21
+ # torch
22
+ nvidia-cuda-cupti-cu12==12.8.90
23
+ # via torch
24
+ nvidia-cuda-nvrtc-cu12==12.8.93
25
+ # via torch
26
+ nvidia-cuda-runtime-cu12==12.8.90
27
+ # via torch
28
+ nvidia-cudnn-cu12==9.10.2.21
29
+ # via torch
30
+ nvidia-cufft-cu12==11.3.3.83
31
+ # via torch
32
+ nvidia-cufile-cu12==1.13.1.3
33
+ # via torch
34
+ nvidia-curand-cu12==10.3.9.90
35
+ # via torch
36
+ nvidia-cusolver-cu12==11.7.3.90
37
+ # via torch
38
+ nvidia-cusparse-cu12==12.5.8.93
39
+ # via
40
+ # nvidia-cusolver-cu12
41
+ # torch
42
+ nvidia-cusparselt-cu12==0.7.1
43
+ # via torch
44
+ nvidia-nccl-cu12==2.27.5
45
+ # via torch
46
+ nvidia-nvjitlink-cu12==12.8.93
47
+ # via
48
+ # nvidia-cufft-cu12
49
+ # nvidia-cusolver-cu12
50
+ # nvidia-cusparse-cu12
51
+ # torch
52
+ nvidia-nvshmem-cu12==3.3.20
53
+ # via torch
54
+ nvidia-nvtx-cu12==12.8.90
55
+ # via torch
56
+ pillow==12.0.0
57
+ # via torchvision
58
+ setuptools==80.9.0
59
+ # via torch
60
+ sympy==1.14.0
61
+ # via torch
62
+ torch==2.9.1
63
+ # via
64
+ # -r requirements.torch.gpu.in
65
+ # torchvision
66
+ torchvision==0.24.1
67
+ # via -r requirements.torch.gpu.in
68
+ triton==3.5.1
69
+ # via torch
70
+ typing-extensions==4.15.0
71
+ # via torch