Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- Dockerfile +12 -0
- README.md +5 -5
- dolphin-2.0-mistral-7b.Q4_K_S.gguf +3 -0
- main.py +34 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
zephyr-7b-beta.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
dolphin-2.0-mistral-7b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
+
|
9 |
+
COPY ./dolphin-2.0-mistral-7b.Q4_K_S.gguf /code/dolphin-2.0-mistral-7b.Q4_K_S.gguf
|
10 |
+
COPY ./main.py /code/main.py
|
11 |
+
|
12 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
-
license:
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: LLM Deployment Zerocost Api
|
3 |
+
emoji: 😻
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: pink
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
+
license: mit
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
dolphin-2.0-mistral-7b.Q4_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be1991132683eb2207a110a41449239c21dcb3d5a108197e74cba2e006de33eb
|
3 |
+
size 4140373664
|
main.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ctransformers import AutoModelForCausalLM
|
2 |
+
from fastapi import FastAPI, Form
|
3 |
+
from pydantic import BaseModel
|
4 |
+
|
5 |
+
#Model loading
|
6 |
+
llm = AutoModelForCausalLM.from_pretrained("dolphin-2.0-mistral-7b.Q4_K_S.gguf",
|
7 |
+
model_type='mistral',
|
8 |
+
max_new_tokens = 1096,
|
9 |
+
threads = 3,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
#Pydantic object
|
14 |
+
class validation(BaseModel):
|
15 |
+
prompt: str
|
16 |
+
|
17 |
+
#Fast API
|
18 |
+
app = FastAPI()
|
19 |
+
|
20 |
+
#Zephyr completion
|
21 |
+
@app.post("/llm_on_cpu")
|
22 |
+
async def stream(item: validation):
|
23 |
+
system_prompt = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.'
|
24 |
+
start,end = "<|im_start|>", "<|im_end|>"
|
25 |
+
prompt = f"<|im_start|>system\n{system_prompt}{end}\n{start}user\n{prompt.strip()}{end}\n"
|
26 |
+
return llm(prompt)
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python-multipart
|
2 |
+
fastapi
|
3 |
+
pydantic
|
4 |
+
uvicorn
|
5 |
+
requests
|
6 |
+
ctransformers
|