File size: 3,780 Bytes
d1343e4
 
559ea97
 
1391fc1
2a813c3
d1343e4
1391fc1
 
b6392d1
1391fc1
b8ccea4
d1343e4
1391fc1
b8ccea4
d1343e4
1391fc1
2a813c3
d1343e4
1391fc1
d1343e4
 
536efdb
2a813c3
 
d1343e4
b995a3b
2a813c3
559ea97
 
e3396ba
d1343e4
 
 
 
 
 
e3396ba
 
1391fc1
 
 
 
 
 
 
 
 
 
 
 
 
 
e3396ba
 
d1343e4
b995a3b
 
 
e3396ba
 
e476745
 
3b4c94a
559ea97
a5ef5d4
e3396ba
 
7fd3f9f
021692e
a5ef5d4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Importing libraries
from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
from quart import Quart, request
from llama_cpp import Llama
import psutil

# Initing things
app = Quart(__name__)                                              # Quart app
llm = Llama(model_path="./model.bin")                              # LLaMa model
llama_model_name = "TheBloke/WizardLM-1.0-Uncensored-Llama2-13B-GGUF"                
translator_tokenizer = M2M100Tokenizer.from_pretrained(            # tokenizer for translator
    "facebook/m2m100_418M", cache_dir="translator/"
)
translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
    "facebook/m2m100_418M", cache_dir="translator/"
)
translator_model.eval()

# Preparing things to work
translator_tokenizer.src_lang = "en"

# Loading prompt
with open('system.prompt', 'r', encoding='utf-8') as f:
    prompt = f.read()

# Defining
@app.post("/request")
async def echo():
    try:
        data = await request.get_json()
        maxTokens = data.get("max_tokens", 64)
        if isinstance(data.get("system_prompt"), str):
            userPrompt = data.get("system_prompt") + "\n\nUser: " + data['request'] + "\nAssistant: "
        else:
            userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
    except:
        return {"error": "Not enough data", "output": "Oops! Error occured! If you're a developer, using this API, check 'error' key."}, 400
    try:
        output = llm(userPrompt, max_tokens=maxTokens, stop=["User:", "\n"], echo=False)
        text = output["choices"][0]["text"]
        # i allowed only certain languages:
        # russian (ru), ukranian (uk), chinese (zh)
        if isinstance(data.get("target_lang"), str) and data.get("target_lang").lower() in ["ru", "uk", "zh"]:
            encoded_input = translator_tokenizer(output, return_tensors="pt")
            generated_tokens = translator_model.generate(
                **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(data.get("target_lang"))
            )
            translated_text = translator_tokenizer.batch_decode(
                generated_tokens, skip_special_tokens=True
            )[0]
            return {"output": text, "translated_output": translated_text}
            
        return {"output": text}
    except Exception as e:
        print(e)
        return {"error": str(e), "output": "Oops! Internal server error. Check the logs. If you're a developer, using this API, check 'error' key."}, 500

@app.get("/")
async def get():
    return '''<style>a:visited{color:black;}</style>
<h1>Hello, world!</h1>
This is showcase how to make own server with Llama2 model.<br>
I'm using here 7b model just for example. Also here's only CPU power.<br>
But you can use GPU power as well!<br><br>
<h1>How to GPU?</h1>
Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code> or <code>`DLLAMA_METAL`</code>.<br><br>
<h1>How to test it on own machine?</h1>
You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br>
Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br>
<br>''' + f"Memory used: {psutil.virtual_memory()[2]}<br>" + '''
<script>document.write("<b>URL of space:</b> "+window.location.href);</script>''' + '''
Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a>, <a href="https://quart.palletsprojects.com/">Quart</a> and <a href="https://www.uvicorn.org/">Uvicorn</a>.<br><br>'''