Spaces:
Runtime error
Runtime error
Commit
Β·
4381f13
1
Parent(s):
271190b
Delete main.py
Browse files
main.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import fastapi
|
2 |
-
import json
|
3 |
-
import markdown
|
4 |
-
import uvicorn
|
5 |
-
from fastapi.responses import HTMLResponse
|
6 |
-
from fastapi.middleware.cors import CORSMiddleware
|
7 |
-
from sse_starlette.sse import EventSourceResponse
|
8 |
-
from ctransformers import AutoModelForCausalLM
|
9 |
-
from ctransformers.langchain import CTransformers
|
10 |
-
from pydantic import BaseModel
|
11 |
-
|
12 |
-
llm = AutoModelForCausalLM.from_pretrained("TheBloke/gorilla-7B-GGML",
|
13 |
-
model_file="Gorilla-7B.ggmlv3.q4_0.bin",
|
14 |
-
model_type="llama")
|
15 |
-
app = fastapi.FastAPI()
|
16 |
-
app.add_middleware(
|
17 |
-
CORSMiddleware,
|
18 |
-
allow_origins=["*"],
|
19 |
-
allow_credentials=True,
|
20 |
-
allow_methods=["*"],
|
21 |
-
allow_headers=["*"],
|
22 |
-
)
|
23 |
-
|
24 |
-
@app.get("/")
|
25 |
-
async def index():
|
26 |
-
html_content = """
|
27 |
-
<!DOCTYPE html>
|
28 |
-
<html>
|
29 |
-
<style>
|
30 |
-
body {
|
31 |
-
font-family: "Arial";
|
32 |
-
}
|
33 |
-
h1 {
|
34 |
-
text-align: "center";
|
35 |
-
}
|
36 |
-
</style>
|
37 |
-
<body>
|
38 |
-
<h1>gorilla</h1>
|
39 |
-
<input id="prompt" type="text">
|
40 |
-
<button id="search">I'm feeling lucky</button>
|
41 |
-
<div id="content"></div>
|
42 |
-
<script>
|
43 |
-
document.getElementById("search").addEventListener("click", () => {
|
44 |
-
let prompt = document.getElementById("prompt").value;
|
45 |
-
let source = new EventSource(`https://matthoffner-gorilla.hf.space/stream?prompt=${prompt}`);
|
46 |
-
source.onmessage = function(event) {
|
47 |
-
console.log(event);
|
48 |
-
let eventData = event.data;
|
49 |
-
document.getElementById("content").innerHTML += eventData
|
50 |
-
};
|
51 |
-
});
|
52 |
-
</script>
|
53 |
-
</body>
|
54 |
-
</html>
|
55 |
-
"""
|
56 |
-
return HTMLResponse(content=html_content, status_code=200)
|
57 |
-
|
58 |
-
@app.get("/stream")
|
59 |
-
async def chat(prompt = "I want to download a dataset from GCS"):
|
60 |
-
tokens = llm.tokenize(prompt)
|
61 |
-
async def server_sent_events(chat_chunks, llm):
|
62 |
-
yield prompt
|
63 |
-
for chat_chunk in llm.generate(chat_chunks):
|
64 |
-
yield llm.detokenize(chat_chunk)
|
65 |
-
yield ""
|
66 |
-
|
67 |
-
return EventSourceResponse(server_sent_events(tokens, llm))
|
68 |
-
|
69 |
-
|
70 |
-
class ChatCompletionRequest(BaseModel):
|
71 |
-
messages: str
|
72 |
-
|
73 |
-
@app.post("/v1/chat/completions")
|
74 |
-
async def chat(request: ChatCompletionRequest, response_mode=None):
|
75 |
-
tokens = llm.tokenize(request.messages.join(' '))
|
76 |
-
async def server_sent_events(chat_chunks, llm):
|
77 |
-
for chat_chunk in llm.generate(chat_chunks):
|
78 |
-
yield llm.detokenize(chat_chunk)
|
79 |
-
yield ""
|
80 |
-
|
81 |
-
return EventSourceResponse(server_sent_events(tokens, llm))
|
82 |
-
|
83 |
-
if __name__ == "__main__":
|
84 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|