#!/bin/bash # Set environment variables for the ollama server export OLLAMA_HOST=0.0.0.0 export OLLAMA_ORIGINS=https://projects.blender.org # Start the Ollama service in the background ollama serve & # Wait for the service to initialize sleep 10 # Download the required file curl -fsSL https://huggingface.co/hugging-quants/Llama-3.2-1B-Instruct-Q4_K_M-GGUF/resolve/main/llama-3.2-1b-instruct-q4_k_m.gguf?download=true -o llama.gguf # Create the model using Ollama ollama create llama3.2 -f Modelfile # Keep the container running indefinitely tail -f /dev/null