Ollama-mistral / Dockerfile
Do0rMaMu's picture
Create Dockerfile
3246b2f verified
raw
history blame
No virus
965 Bytes
# Base image with Python support
FROM python:3.9
# Install dependencies for Ollama installation script (optional)
RUN apt-get update && apt-get install -y curl wget
# Download and potentially execute Ollama installation script (commented out)
RUN curl -fsSL https://ollama.com/install.sh | /bin/bash -
# **Alternative: Install Ollama using a package manager (if available)**
RUN pip install ollama
# Set working directory
WORKDIR /
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Copy your FastAPI application code (assuming 'main.py')
COPY main.py .
# Start Ollama server in the background
RUN nohup ollama &
# Download the Mistral model (modify URL if needed)
RUN sleep 15 && ollama pull mistral
# Expose Ollama server port (commented out)
# EXPOSE 11434 # Not required for this approach
# **Entrypoint command: Run Uvicorn server**
CMD [ "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860" ]