Spaces:
Sleeping
Sleeping
| # Importing libraries | |
| from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration | |
| from quart import Quart, request | |
| from llama_cpp import Llama | |
| # Initing things | |
| app = Quart(__name__) # Quart app | |
| llm = Llama(model_path="./model.bin") # LLaMa model | |
| tokenizer = M2M100Tokenizer.from_pretrained( # tokenizer for translator | |
| "facebook/m2m100_1.2B", cache_dir="translator/" | |
| ) | |
| model = M2M100ForConditionalGeneration.from_pretrained( # translator model | |
| "facebook/m2m100_1.2B", cache_dir="translator/" | |
| ) | |
| model.eval() | |
| # Preparing things to work | |
| tokenizer.src_lang = "en" | |
| # Loading prompt | |
| with open('system.prompt', 'r', encoding='utf-8') as f: | |
| prompt = f.read() | |
| # Defining | |
| async def echo(): | |
| try: | |
| data = await request.get_json() | |
| maxTokens = data.get("max_tokens", 64) | |
| if isinstance(data.get("system_prompt"), str): | |
| userPrompt = data.get("system_prompt") + "\n\nUser: " + data['request'] + "\nAssistant: " | |
| else: | |
| userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: " | |
| except: | |
| return {"error": "Not enough data", "output": "Oops! Error occured! If you're a developer, using this API, check 'error' key."}, 400 | |
| try: | |
| output = llm(userPrompt, max_tokens=maxTokens, stop=["User:", "\n"], echo=False) | |
| return {"output": output["choices"][0]["text"]} | |
| except Exception as e: | |
| print(e) | |
| return {"error": str(e), "output": "Oops! Internal server error. Check the logs. If you're a developer, using this API, check 'error' key."}, 500 | |
| async def get(): | |
| return '''<style>a:visited{color:black;}</style> | |
| <h1>Hello, world!</h1> | |
| This is showcase how to make own server with Llama2 model.<br> | |
| I'm using here 7b model just for example. Also here's only CPU power.<br> | |
| But you can use GPU power as well!<br> | |
| <h1>How to GPU?</h1> | |
| Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code>, <code>`DLLAMA_METAL`</code> or <code>`DLLAMA_METAL`</code>.<br> | |
| Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a>, <a href="https://quart.palletsprojects.com/">Quart</a> and <a href="https://www.uvicorn.org/">Uvicorn</a>.<br> | |
| <h1>How to test it on own machine?</h1> | |
| You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br> | |
| Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br> | |
| <br> | |
| <script>document.write("<b>URL of space:</b> "+window.location.href);</script>''' |