# Importing libraries
from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
from llama_cpp import Llama
import gradio as gr
import psutil
# Initing things
print("! DOWNLOADING TOKENIZER AND SETTING ALL UP !")
translator_tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator
"facebook/m2m100_418M", cache_dir="translator/"
)
print("! DOWNLOADING MODEL AND SETTING ALL UP !")
translator_model = M2M100ForConditionalGeneration.from_pretrained( # translator model
"facebook/m2m100_418M", cache_dir="translator/"
)
print("! SETTING MODEL IN EVALUATION MODE !")
translator_model.eval()
print("! INITING LLAMA MODEL !")
llm = Llama(model_path="./model.bin") # LLaMa model
llama_model_name = "TheBloke/WizardLM-1.0-Uncensored-Llama2-13B-GGUF"
print("! INITING DONE !")
# Preparing things to work
translator_tokenizer.src_lang = "en"
title = "llama.cpp API"
desc = '''
Hello, world!
This is showcase how to make own server with Llama2 model.
I'm using here 13b model just for example. Also here's only CPU power.
But you can use GPU power as well!
How to GPU?
Change `CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`
in Dockerfile on `CMAKE_ARGS="-DLLAMA_CUBLAS=on"`
. Also you can try `DLLAMA_CLBLAST`
or `DLLAMA_METAL`
.
Powered by llama-cpp-python and Gradio.
How to test it on own machine?
You can install Docker, build image and run it. I made `run-docker.sh`
for ya. To stop container run `docker ps`
, find name of container and run `docker stop _dockerContainerName_`
Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.
''' + f"Memory used: {psutil.virtual_memory()[2]}
" + '''
'''
'''
# Defining languages for translator (i just chose popular on my opinion languages!!!)
ru - Russian
uk - Ukranian
zh - Chinese
de - German
fr - French
hi - Hindi
it - Italian
ja - Japanese
es - Spanish
ar - Arabic
'''
languages = ["ru", "uk", "zh", "de", "fr", "hi", "it", "ja", "es", "ar"]
# Loading prompt
with open('system.prompt', 'r', encoding='utf-8') as f:
prompt = f.read()
def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
logs = f"Request: {request}\nMax tokens: {max_tokens}\nLanguage: {language}\nCustom prompt: {custom_prompt}\n"
try:
maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
if isinstance(custom_prompt, str):
userPrompt = custom_prompt + "\n\nUser: " + request + "\nAssistant: "
else:
userPrompt = prompt + "\n\nUser: " + request + "\nAssistant: "
except:
return "Not enough data! Check that you passed all needed data.", logs
try:
output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False)
text = output["choices"][0]["text"]
if language in languages:
logs += f"\nTranslating from en to {language}"
encoded_input = translator_tokenizer(text, return_tensors="pt")
generated_tokens = translator_model.generate(
**encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
)
translated_text = translator_tokenizer.batch_decode(
generated_tokens, skip_special_tokens=True
)[0]
logs += f"\nTranslated: {translated_text}\nOriginal: {text}"
return translated_text, logs
logs += f"\nOriginal: {text}"
return text, logs
except Exception as e:
print(e)
return "Oops! Internal server error. Check the logs of space/instance.", logs
print("\n\n\n")
print("! LOAD GRADIO INTERFACE !")
demo = gr.Interface(
fn=generate_answer,
inputs=[
gr.components.Textbox(label="Input"),
gr.components.Number(value=256),
gr.components.Dropdown(label="Target Language", value="en", choices=["en"]+languages),
gr.components.Textbox(label="Custom system prompt"),
],
outputs=[
gr.components.Textbox(label="Output"),
gr.components.Textbox(label="Logs")
],
title=title,
description=desc,
allow_flagging='never'
)
demo.queue()
print("! LAUNCHING GRADIO !")
demo.launch(server_name="0.0.0.0")