File size: 2,846 Bytes
d1343e4
 
559ea97
 
2a813c3
d1343e4
 
 
 
 
 
 
 
 
 
2a813c3
d1343e4
 
 
 
536efdb
2a813c3
 
d1343e4
b995a3b
2a813c3
559ea97
 
e3396ba
d1343e4
 
 
 
 
 
e3396ba
 
 
 
 
d1343e4
b995a3b
 
 
e3396ba
 
e476745
 
559ea97
 
e3396ba
 
 
 
7fd3f9f
 
d1343e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# Importing libraries
from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
from quart import Quart, request
from llama_cpp import Llama

# Initing things
app = Quart(__name__)                                   # Quart app
llm = Llama(model_path="./model.bin")                   # LLaMa model
tokenizer = M2M100Tokenizer.from_pretrained(            # tokenizer for translator
    "facebook/m2m100_1.2B", cache_dir="translator/"
)
model = M2M100ForConditionalGeneration.from_pretrained( # translator model
    "facebook/m2m100_1.2B", cache_dir="translator/"
)
model.eval()

# Preparing things to work
tokenizer.src_lang = "en"

# Loading prompt
with open('system.prompt', 'r', encoding='utf-8') as f:
    prompt = f.read()

# Defining
@app.post("/request")
async def echo():
    try:
        data = await request.get_json()
        maxTokens = data.get("max_tokens", 64)
        if isinstance(data.get("system_prompt"), str):
            userPrompt = data.get("system_prompt") + "\n\nUser: " + data['request'] + "\nAssistant: "
        else:
            userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
    except:
        return {"error": "Not enough data", "output": "Oops! Error occured! If you're a developer, using this API, check 'error' key."}, 400
    try:
        output = llm(userPrompt, max_tokens=maxTokens, stop=["User:", "\n"], echo=False)
        return {"output": output["choices"][0]["text"]}
    except Exception as e:
        print(e)
        return {"error": str(e), "output": "Oops! Internal server error. Check the logs. If you're a developer, using this API, check 'error' key."}, 500

@app.get("/")
async def get():
    return '''<style>a:visited{color:black;}</style>
<h1>Hello, world!</h1>
This is showcase how to make own server with Llama2 model.<br>
I'm using here 7b model just for example. Also here's only CPU power.<br>
But you can use GPU power as well!<br>
<h1>How to GPU?</h1>
Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code>, <code>`DLLAMA_METAL`</code> or <code>`DLLAMA_METAL`</code>.<br>
Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a>, <a href="https://quart.palletsprojects.com/">Quart</a> and <a href="https://www.uvicorn.org/">Uvicorn</a>.<br>
<h1>How to test it on own machine?</h1>
You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br>
Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br>
<br>
<script>document.write("<b>URL of space:</b> "+window.location.href);</script>'''