Spaces:
Sleeping
Sleeping
toaster61
commited on
Commit
·
d4735f7
1
Parent(s):
a61e98e
smol fixes
Browse files- gradio_app.py +4 -3
- quart_app.py +0 -72
- system.prompt +5 -2
gradio_app.py
CHANGED
@@ -59,9 +59,9 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
|
|
59 |
try:
|
60 |
maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
|
61 |
if isinstance(custom_prompt, str):
|
62 |
-
userPrompt = custom_prompt
|
63 |
else:
|
64 |
-
userPrompt = prompt
|
65 |
logs += f"\nFinal prompt: {userPrompt}\n"
|
66 |
except:
|
67 |
return "Not enough data! Check that you passed all needed data.", logs
|
@@ -78,7 +78,8 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
|
|
78 |
counter += 1
|
79 |
logs += f"Final attempt: {counter}\n"
|
80 |
|
81 |
-
|
|
|
82 |
logs += f"\nTranslating from en to {language}"
|
83 |
encoded_input = translator_tokenizer(text, return_tensors="pt")
|
84 |
generated_tokens = translator_model.generate(
|
|
|
59 |
try:
|
60 |
maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
|
61 |
if isinstance(custom_prompt, str):
|
62 |
+
userPrompt = custom_prompt.replace("{prompt}", request)
|
63 |
else:
|
64 |
+
userPrompt = prompt.replace("{prompt}", request)
|
65 |
logs += f"\nFinal prompt: {userPrompt}\n"
|
66 |
except:
|
67 |
return "Not enough data! Check that you passed all needed data.", logs
|
|
|
78 |
counter += 1
|
79 |
logs += f"Final attempt: {counter}\n"
|
80 |
|
81 |
+
|
82 |
+
if language in languages and language != "en":
|
83 |
logs += f"\nTranslating from en to {language}"
|
84 |
encoded_input = translator_tokenizer(text, return_tensors="pt")
|
85 |
generated_tokens = translator_model.generate(
|
quart_app.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
# Importing libraries
|
2 |
-
from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
|
3 |
-
from quart import Quart, request
|
4 |
-
from llama_cpp import Llama
|
5 |
-
import psutil
|
6 |
-
|
7 |
-
# Initing things
|
8 |
-
app = Quart(__name__) # Quart app
|
9 |
-
llm = Llama(model_path="./model.bin") # LLaMa model
|
10 |
-
llama_model_name = "TheBloke/WizardLM-1.0-Uncensored-Llama2-13B-GGUF"
|
11 |
-
translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
|
12 |
-
"facebook/m2m100_418M", cache_dir="translator/"
|
13 |
-
)
|
14 |
-
translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
|
15 |
-
"facebook/m2m100_418M", cache_dir="translator/"
|
16 |
-
)
|
17 |
-
translator_model.eval()
|
18 |
-
|
19 |
-
# Preparing things to work
|
20 |
-
translator_tokenizer.src_lang = "en"
|
21 |
-
|
22 |
-
# Loading prompt
|
23 |
-
with open('system.prompt', 'r', encoding='utf-8') as f:
|
24 |
-
prompt = f.read()
|
25 |
-
|
26 |
-
# Defining
|
27 |
-
@app.post("/request")
|
28 |
-
async def echo():
|
29 |
-
try:
|
30 |
-
data = await request.get_json()
|
31 |
-
maxTokens = data.get("max_tokens", 64)
|
32 |
-
if isinstance(data.get("system_prompt"), str):
|
33 |
-
userPrompt = data.get("system_prompt") + "\n\nUser: " + data['request'] + "\nAssistant: "
|
34 |
-
else:
|
35 |
-
userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
|
36 |
-
except:
|
37 |
-
return {"error": "Not enough data", "output": "Oops! Error occured! If you're a developer, using this API, check 'error' key."}, 400
|
38 |
-
try:
|
39 |
-
output = llm(userPrompt, max_tokens=maxTokens, stop=["User:", "\n"], echo=False)
|
40 |
-
text = output["choices"][0]["text"]
|
41 |
-
# i allowed only certain languages:
|
42 |
-
# russian (ru), ukranian (uk), chinese (zh)
|
43 |
-
if isinstance(data.get("target_lang"), str) and data.get("target_lang").lower() in ["ru", "uk", "zh"]:
|
44 |
-
encoded_input = translator_tokenizer(output, return_tensors="pt")
|
45 |
-
generated_tokens = translator_model.generate(
|
46 |
-
**encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(data.get("target_lang"))
|
47 |
-
)
|
48 |
-
translated_text = translator_tokenizer.batch_decode(
|
49 |
-
generated_tokens, skip_special_tokens=True
|
50 |
-
)[0]
|
51 |
-
return {"output": text, "translated_output": translated_text}
|
52 |
-
|
53 |
-
return {"output": text}
|
54 |
-
except Exception as e:
|
55 |
-
print(e)
|
56 |
-
return {"error": str(e), "output": "Oops! Internal server error. Check the logs. If you're a developer, using this API, check 'error' key."}, 500
|
57 |
-
|
58 |
-
@app.get("/")
|
59 |
-
async def get():
|
60 |
-
return '''<style>a:visited{color:black;}</style>
|
61 |
-
<h1>Hello, world!</h1>
|
62 |
-
This is showcase how to make own server with Llama2 model.<br>
|
63 |
-
I'm using here 7b model just for example. Also here's only CPU power.<br>
|
64 |
-
But you can use GPU power as well!<br><br>
|
65 |
-
<h1>How to GPU?</h1>
|
66 |
-
Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code> or <code>`DLLAMA_METAL`</code>.<br><br>
|
67 |
-
<h1>How to test it on own machine?</h1>
|
68 |
-
You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br>
|
69 |
-
Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br>
|
70 |
-
<br>''' + f"Memory used: {psutil.virtual_memory()[2]}<br>" + '''
|
71 |
-
<script>document.write("<b>URL of space:</b> "+window.location.href);</script>''' + '''
|
72 |
-
Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a>, <a href="https://quart.palletsprojects.com/">Quart</a> and <a href="https://www.uvicorn.org/">Uvicorn</a>.<br><br>'''
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
system.prompt
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
You're an AI assistant named Alex. You're friendly and respectful.
|
2 |
You speak as briefly, clearly and to the point as possible.
|
3 |
-
You know many languages, for example:
|
4 |
You don't have access to the internet, so rely on your knowledge.
|
5 |
-
If you are not sure of your answer, say so, but try not to misinform the user.
|
|
|
|
|
|
|
|
1 |
You're an AI assistant named Alex. You're friendly and respectful.
|
2 |
You speak as briefly, clearly and to the point as possible.
|
3 |
+
You know many languages, for example: English, Russian.
|
4 |
You don't have access to the internet, so rely on your knowledge.
|
5 |
+
If you are not sure of your answer, say so, but try not to misinform the user.
|
6 |
+
|
7 |
+
USER: {prompt}
|
8 |
+
ASSISTANT:
|