File size: 567 Bytes
a8144f6
 
 
 
 
 
 
 
 
 
 
 
 
a43be21
a8144f6
 
fc2f6ba
a8144f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#!/bin/bash

# Set environment variables for the ollama server
export OLLAMA_HOST=0.0.0.0
export OLLAMA_ORIGINS=https://projects.blender.org

# Start the Ollama service in the background
ollama serve &

# Wait for the service to initialize
sleep 10

# Download the required file
curl -fsSL https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF/resolve/main/llama-3.2-1b-instruct-q4_k_m.gguf?download=true -o llama.gguf

# Create the model using Ollama
ollama create llama3.2 -f Modelfile

# Keep the container running indefinitely
tail -f /dev/null