Hameed13 commited on
Commit
fe3cab4
·
verified ·
1 Parent(s): 489316d

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +43 -57
Dockerfile CHANGED
@@ -4,11 +4,10 @@ FROM python:3.10-slim
4
  # Set working directory
5
  WORKDIR /app
6
 
7
- # Set environment variables for better performance
 
 
8
  ENV PYTHONUNBUFFERED=1
9
- ENV HF_HOME=/tmp/huggingface
10
- ENV TORCH_HOME=/tmp/torch
11
- ENV PIP_NO_CACHE_DIR=1
12
 
13
  # Install system dependencies
14
  RUN apt-get update && apt-get install -y \
@@ -17,68 +16,55 @@ RUN apt-get update && apt-get install -y \
17
  curl \
18
  build-essential \
19
  ffmpeg \
20
- libsndfile1 \
21
  && rm -rf /var/lib/apt/lists/*
22
 
23
- # Upgrade pip
24
- RUN pip install --upgrade pip
25
-
26
- # Install torch first with CPU-only version (much smaller and faster)
27
- RUN pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu
28
-
29
- # Copy and install requirements
30
  COPY requirements.txt .
31
- RUN pip install --no-cache-dir -r requirements.txt
32
-
33
- # Copy yarngpt source code
34
- COPY yarngpt/ /app/yarngpt/
35
-
36
- # Add yarngpt to Python path and install its dependencies
37
- ENV PYTHONPATH="${PYTHONPATH}:/app/yarngpt"
38
-
39
- # Try to install yarngpt package from PyPI first as fallback
40
- RUN pip install yarngpt || echo "PyPI install failed, using local copy"
41
 
42
- # Copy application files
43
- COPY . .
44
-
45
- # Create directories
46
- RUN mkdir -p audio_files models
47
 
48
- # Make scripts executable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  RUN chmod +x start.sh
50
 
51
- # Download model files (these are smaller and should download faster)
52
- RUN python -c "
53
- import os
54
- import requests
55
- from pathlib import Path
56
-
57
- # Create models directory
58
- os.makedirs('models', exist_ok=True)
59
-
60
- # Download WavTokenizer model files
61
- files_to_download = [
62
- ('https://huggingface.co/novateur/WavTokenizer-medium-speech-75token/resolve/main/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.yaml', 'models/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.yaml'),
63
- ('https://huggingface.co/novateur/WavTokenizer-medium-speech-75token/resolve/main/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.ckpt', 'models/wavtokenizer_mediumdata_frame75_3s_nq1_code4096_dim512_kmeans200_attn.ckpt')
64
- ]
65
-
66
- for url, filepath in files_to_download:
67
- print(f'Downloading {filepath}...')
68
- try:
69
- response = requests.get(url, stream=True, timeout=300)
70
- response.raise_for_status()
71
- with open(filepath, 'wb') as f:
72
- for chunk in response.iter_content(chunk_size=8192):
73
- f.write(chunk)
74
- print(f'Successfully downloaded {filepath}')
75
- except Exception as e:
76
- print(f'Failed to download {filepath}: {e}')
77
- # Continue with other downloads
78
- "
79
 
80
  # Expose port
81
  EXPOSE 7860
82
 
83
- # Use the startup script
84
  CMD ["./start.sh"]
 
4
  # Set working directory
5
  WORKDIR /app
6
 
7
+ # Set environment variables
8
+ ENV PYTHONPATH="${PYTHONPATH}:/app:/app/yarngpt"
9
+ ENV HF_HOME="/tmp/huggingface"
10
  ENV PYTHONUNBUFFERED=1
 
 
 
11
 
12
  # Install system dependencies
13
  RUN apt-get update && apt-get install -y \
 
16
  curl \
17
  build-essential \
18
  ffmpeg \
19
+ && apt-get clean \
20
  && rm -rf /var/lib/apt/lists/*
21
 
22
+ # Copy requirements first for better caching
 
 
 
 
 
 
23
  COPY requirements.txt .
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Install Python dependencies
26
+ RUN pip install --no-cache-dir -r requirements.txt
 
 
 
27
 
28
+ # Copy yarngpt folder and other files
29
+ COPY yarngpt/ ./yarngpt/
30
+ COPY main.py .
31
+ COPY start.sh .
32
+
33
+ # Try to install yarngpt from PyPI as backup
34
+ RUN pip install yarngpt || echo "PyPI installation failed, using local copy"
35
+
36
+ # Create model download script
37
+ RUN echo 'import os\n\
38
+ import requests\n\
39
+ from pathlib import Path\n\
40
+ \n\
41
+ def download_file(url, filepath):\n\
42
+ print(f"Downloading {filepath}...")\n\
43
+ response = requests.get(url, stream=True)\n\
44
+ response.raise_for_status()\n\
45
+ with open(filepath, "wb") as f:\n\
46
+ for chunk in response.iter_content(chunk_size=8192):\n\
47
+ f.write(chunk)\n\
48
+ print(f"Downloaded {filepath}")\n\
49
+ \n\
50
+ # Create directories\n\
51
+ Path("/tmp/huggingface/hub").mkdir(parents=True, exist_ok=True)\n\
52
+ \n\
53
+ print("Model files will be downloaded during startup...")\n\
54
+ print("Setup complete!")' > download_models.py
55
+
56
+ # Run the download script
57
+ RUN python download_models.py
58
+
59
+ # Make start script executable
60
  RUN chmod +x start.sh
61
 
62
+ # Health check
63
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
64
+ CMD curl -f http://localhost:7860/health || exit 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  # Expose port
67
  EXPOSE 7860
68
 
69
+ # Start the application
70
  CMD ["./start.sh"]