version: '3.8' services: lily-llm-api-gpu: build: context: . dockerfile: Dockerfile.gpu container_name: lily-llm-api-gpu ports: - "8001:8001" volumes: - ./uploads:/app/uploads - ./vector_stores:/app/vector_stores - ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores - ./lily_llm_media:/app/lily_llm_media - ./hearth_llm_model:/app/hearth_llm_model environment: - CUDA_VISIBLE_DEVICES=0 - PYTHONPATH=/app - LILY_LLM_ENV=production deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [ gpu ] restart: unless-stopped networks: - lily-network # LaTeX-OCR 전용 컨테이너 (CPU 기반) latex-ocr-service: build: context: . dockerfile: Dockerfile.latex-ocr container_name: latex-ocr-service volumes: - ./uploads:/app/uploads - ./latex_ocr_faiss_stores:/app/latex_ocr_faiss_stores environment: - PYTHONPATH=/app restart: unless-stopped networks: - lily-network # Hearth Chat 서비스 (별도 컨테이너) hearth-chat: image: node:18-alpine container_name: hearth-chat working_dir: /app volumes: - ../hearth_chat_package:/app ports: - "8000:8000" command: [ "npm", "start" ] restart: unless-stopped networks: - lily-network networks: lily-network: driver: bridge volumes: lily-data: