File size: 687 Bytes
4360442
 
42e16df
 
 
4360442
42e16df
4360442
 
 
a871eb3
 
 
 
 
 
 
 
 
 
4360442
a871eb3
4360442
 
 
 
 
 
f9b0345
 
 
 
4360442
1a1b743
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
#!/bin/bash

# Source the virtual environment
source /app/venv/bin/activate

# Starting server
echo "Starting Ollama server"
ollama serve &
sleep 1

# Try to get the model environment variable
if [ -n "${MODEL}" ]; then
  # Split the MODEL variable into an array
  IFS=',' read -ra MODELS <<< "${MODEL}"
else
  # Use the default list of models
  MODELS=(phi3 mistral llama3 gemma:2b)
fi


# Splitting the models by comma and pulling each
#IFS=',' read -ra MODELS <<< "$model"
for m in "${MODELS[@]}"; do
    echo "Pulling $m"
    ollama pull "$m"
    sleep 5
done


# Run the Python application
exec python ./run.py

# Keep the script running to prevent the container from exiting
#wait