lily_fast_api / docker-compose.gpu.yml
gbrabbit's picture
Fresh start for HF Spaces deployment
526927a
raw
history blame
1.51 kB
version: '3.8'
services:
lily-llm-api-gpu:
build:
context: .
dockerfile: Dockerfile.gpu
container_name: lily-llm-api-gpu
ports:
- "8001:8001"
volumes:
- ./uploads:/app/uploads
- ./vector_stores:/app/vector_stores
- ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores
- ./lily_llm_media:/app/lily_llm_media
- ./hearth_llm_model:/app/hearth_llm_model
environment:
- CUDA_VISIBLE_DEVICES=0
- PYTHONPATH=/app
- LILY_LLM_ENV=production
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [ gpu ]
restart: unless-stopped
networks:
- lily-network
# LaTeX-OCR ์ „์šฉ ์ปจํ…Œ์ด๋„ˆ (CPU ๊ธฐ๋ฐ˜)
latex-ocr-service:
build:
context: .
dockerfile: Dockerfile.latex-ocr
container_name: latex-ocr-service
volumes:
- ./uploads:/app/uploads
- ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores
environment:
- PYTHONPATH=/app
restart: unless-stopped
networks:
- lily-network
# Hearth Chat ์„œ๋น„์Šค (๋ณ„๋„ ์ปจํ…Œ์ด๋„ˆ)
hearth-chat:
image: node:18-alpine
container_name: hearth-chat
working_dir: /app
volumes:
- ../hearth_chat_package:/app
ports:
- "8000:8000"
command: [ "npm", "start" ]
restart: unless-stopped
networks:
- lily-network
networks:
lily-network:
driver: bridge
volumes:
lily-data: