Spaces:
Build error
Build error
update
Browse files- Dockerfile +22 -0
- Modelfile +13 -0
- README.md +10 -2
Dockerfile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ollama/ollama:latest
|
| 2 |
+
|
| 3 |
+
RUN apt-get update && apt-get install curl -y
|
| 4 |
+
|
| 5 |
+
# https://huggingface.co/docs/hub/spaces-sdks-docker-first-demo
|
| 6 |
+
RUN useradd -m -u 1000 user
|
| 7 |
+
|
| 8 |
+
USER user
|
| 9 |
+
|
| 10 |
+
ENV HOME=/home/user \
|
| 11 |
+
PATH=/home/user/.local/bin:$PATH \
|
| 12 |
+
OLLAMA_HOST=0.0.0.0
|
| 13 |
+
|
| 14 |
+
WORKDIR $HOME/app
|
| 15 |
+
|
| 16 |
+
COPY --chown=user:user Modelfile $HOME/app/
|
| 17 |
+
|
| 18 |
+
RUN curl -fsSL https://huggingface.co/gingdev/llama7b-ictu-v2/resolve/main/llama7b_q4_k_m.gguf?download=true -o llama.gguf
|
| 19 |
+
|
| 20 |
+
RUN ollama serve & sleep 5 && ollama create llama -f Modelfile
|
| 21 |
+
|
| 22 |
+
EXPOSE 11434
|
Modelfile
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM ./llama.gguf
|
| 2 |
+
|
| 3 |
+
SYSTEM """Bạn là một trợ lí AI hữu ích. Hãy trả lời người dùng một cách chính xác."""
|
| 4 |
+
|
| 5 |
+
TEMPLATE """{{ if .System }}<|im_start|>system
|
| 6 |
+
{{ .System }}<|im_end|>
|
| 7 |
+
{{ end }}{{ if .Prompt }}<|im_start|>user
|
| 8 |
+
{{ .Prompt }}<|im_end|>
|
| 9 |
+
{{ end }}<|im_start|>assistant
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
PARAMETER stop <|im_start|>
|
| 13 |
+
PARAMETER stop <|im_end|>
|
README.md
CHANGED
|
@@ -1,2 +1,10 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Ollama
|
| 3 |
+
emoji: 😍
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 11434
|
| 8 |
+
pinned: true
|
| 9 |
+
---
|
| 10 |
+
# Ollama Server Gemma2b
|