Upload 7 files
Browse files- app.py +27 -0
- fn.py +31 -0
- install.bat +56 -0
- main.py +42 -0
- refresh.py +23 -0
- requirements.txt +3 -0
- venv.sh +10 -0
app.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import fn
|
2 |
+
import gradio as gr
|
3 |
+
from refresh import create_refresh_button
|
4 |
+
|
5 |
+
with gr.Blocks() as demo:
|
6 |
+
title = gr.Markdown('# llama-cpp-python server manager')
|
7 |
+
info = gr.Markdown()
|
8 |
+
url = gr.Textbox(label='URL', interactive=True)
|
9 |
+
download_button = gr.Button(value='Download')
|
10 |
+
model = gr.Dropdown(label='Model', choices=fn.list_model(), interactive=True)
|
11 |
+
create_refresh_button(gr, model, lambda: None, lambda: {'choices': fn.list_model()}, 'refresh-button', interactive=True)
|
12 |
+
set_model_button = gr.Button(value='Set Model')
|
13 |
+
|
14 |
+
download_button.click(
|
15 |
+
fn=fn.download,
|
16 |
+
inputs=[url],
|
17 |
+
outputs=[info],
|
18 |
+
)
|
19 |
+
|
20 |
+
set_model_button.click(
|
21 |
+
fn=fn.set_model,
|
22 |
+
inputs=[model],
|
23 |
+
outputs=[],
|
24 |
+
)
|
25 |
+
|
26 |
+
if __name__ == '__main__':
|
27 |
+
demo.launch()
|
fn.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
|
4 |
+
def download(url):
|
5 |
+
filename = os.path.basename(url)
|
6 |
+
r = requests.get(url, stream=True)
|
7 |
+
with open(f'models/{filename}', 'wb') as f:
|
8 |
+
for chunk in r.iter_content(chunk_size=1024*1024):
|
9 |
+
if chunk:
|
10 |
+
f.write(chunk)
|
11 |
+
f.flush()
|
12 |
+
|
13 |
+
return filename
|
14 |
+
|
15 |
+
def list_model():
|
16 |
+
if not os.path.exists('models'):
|
17 |
+
os.makedirs('models')
|
18 |
+
return os.listdir('models')
|
19 |
+
|
20 |
+
def set_model(filename):
|
21 |
+
if not os.path.exists(f'models/{filename}'):
|
22 |
+
return False
|
23 |
+
|
24 |
+
with open(f'models/set.txt', 'w', encoding='utf-8') as f:
|
25 |
+
f.write(filename)
|
26 |
+
|
27 |
+
cmd = f'venv/bin/python -m llama_cpp.server --model models/{filename} --port $1 --n_gpu_layers 999'
|
28 |
+
with open(f'run.sh', 'w', encoding='utf-8') as f:
|
29 |
+
f.write(cmd)
|
30 |
+
|
31 |
+
return True
|
install.bat
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
rem -------------------------------------------
|
4 |
+
rem NOT guaranteed to work on Windows
|
5 |
+
|
6 |
+
set APPDIR=lcps
|
7 |
+
set REPOS=https://huggingface.co/spaces/aka7774/%APPDIR%
|
8 |
+
set VENV=venv
|
9 |
+
|
10 |
+
rem -------------------------------------------
|
11 |
+
|
12 |
+
set INSTALL_DIR=%~dp0
|
13 |
+
cd /d %INSTALL_DIR%
|
14 |
+
|
15 |
+
:git_clone
|
16 |
+
set DL_URL=%REPOS%
|
17 |
+
set DL_DST=%APPDIR%
|
18 |
+
git clone %DL_URL% %APPDIR%
|
19 |
+
if exist %DL_DST% goto install_python
|
20 |
+
|
21 |
+
set DL_URL=https://github.com/git-for-windows/git/releases/download/v2.41.0.windows.3/PortableGit-2.41.0.3-64-bit.7z.exe
|
22 |
+
set DL_DST=PortableGit-2.41.0.3-64-bit.7z.exe
|
23 |
+
curl -L -o %DL_DST% %DL_URL%
|
24 |
+
if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
|
25 |
+
%DL_DST% -y
|
26 |
+
del %DL_DST%
|
27 |
+
|
28 |
+
set GIT=%INSTALL_DIR%PortableGit\bin\git
|
29 |
+
%GIT% clone %REPOS%
|
30 |
+
|
31 |
+
:install_python
|
32 |
+
set DL_URL=https://github.com/indygreg/python-build-standalone/releases/download/20240107/cpython-3.10.13+20240107-i686-pc-windows-msvc-shared-install_only.tar.gz
|
33 |
+
set DL_DST="%INSTALL_DIR%python.tar.gz"
|
34 |
+
curl -L -o %DL_DST% %DL_URL%
|
35 |
+
if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
|
36 |
+
tar -xzf %DL_DST%
|
37 |
+
|
38 |
+
set PYTHON=%INSTALL_DIR%python\python.exe
|
39 |
+
set PATH=%PATH%;%INSTALL_DIR%python310\Scripts
|
40 |
+
|
41 |
+
:install_venv
|
42 |
+
cd %APPDIR%
|
43 |
+
%PYTHON% -m venv %VENV%
|
44 |
+
set PYTHON=%VENV%\Scripts\python.exe
|
45 |
+
|
46 |
+
:install_pip
|
47 |
+
set DL_URL=https://bootstrap.pypa.io/get-pip.py
|
48 |
+
set DL_DST=%INSTALL_DIR%get-pip.py
|
49 |
+
curl -o %DL_DST% %DL_URL%
|
50 |
+
if not exist %DL_DST% bitsadmin /transfer dl %DL_URL% %DL_DST%
|
51 |
+
%PYTHON% %DL_DST%
|
52 |
+
|
53 |
+
%PYTHON% -m pip install gradio
|
54 |
+
%PYTHON% -m pip install -r requirements.txt
|
55 |
+
|
56 |
+
pause
|
main.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import time
|
4 |
+
import io
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
from fastapi import FastAPI, Request, status, Form, UploadFile
|
8 |
+
from fastapi.staticfiles import StaticFiles
|
9 |
+
from fastapi.middleware.cors import CORSMiddleware
|
10 |
+
from pydantic import BaseModel, Field
|
11 |
+
from fastapi.exceptions import RequestValidationError
|
12 |
+
from fastapi.responses import Response
|
13 |
+
|
14 |
+
import fn
|
15 |
+
import gradio as gr
|
16 |
+
from app import demo
|
17 |
+
|
18 |
+
app = FastAPI()
|
19 |
+
|
20 |
+
app.add_middleware(
|
21 |
+
CORSMiddleware,
|
22 |
+
allow_origins=['*'],
|
23 |
+
allow_credentials=True,
|
24 |
+
allow_methods=["*"],
|
25 |
+
allow_headers=["*"],
|
26 |
+
)
|
27 |
+
|
28 |
+
gr.mount_gradio_app(app, demo, path="/gradio")
|
29 |
+
|
30 |
+
@app.post("/download")
|
31 |
+
async def api_download(url: str):
|
32 |
+
filename = fn.download(url)
|
33 |
+
return {"status": 0, "filename": filename}
|
34 |
+
|
35 |
+
@app.get("/list_model")
|
36 |
+
async def api_list_model():
|
37 |
+
return {"status": 0, "models": fn.list_model()}
|
38 |
+
|
39 |
+
@app.post("/set_model")
|
40 |
+
async def api_set_model(model: str):
|
41 |
+
fn.set_model(model)
|
42 |
+
return {"status": 0}
|
refresh.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
def create_refresh_button(gr, refresh_component, refresh_method, refreshed_args, elem_class, interactive=True):
|
3 |
+
"""
|
4 |
+
Copied from https://github.com/AUTOMATIC1111/stable-diffusion-webui
|
5 |
+
"""
|
6 |
+
refresh_symbol = '🔄'
|
7 |
+
def refresh():
|
8 |
+
refresh_method()
|
9 |
+
args = refreshed_args() if callable(refreshed_args) else refreshed_args
|
10 |
+
|
11 |
+
for k, v in args.items():
|
12 |
+
setattr(refresh_component, k, v)
|
13 |
+
|
14 |
+
return gr.update(**(args or {}))
|
15 |
+
|
16 |
+
refresh_button = gr.Button(refresh_symbol, elem_classes=elem_class, interactive=interactive)
|
17 |
+
refresh_button.click(
|
18 |
+
fn=refresh,
|
19 |
+
inputs=[],
|
20 |
+
outputs=[refresh_component]
|
21 |
+
)
|
22 |
+
|
23 |
+
return refresh_button
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
python-multipart
|
venv.sh
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/bash
|
2 |
+
|
3 |
+
python3 -m venv venv
|
4 |
+
curl -kL https://bootstrap.pypa.io/get-pip.py | venv/bin/python
|
5 |
+
|
6 |
+
CMAKE_ARGS="-DLLAMA_CUBLAS=on" venv/bin/python -m pip install llama-cpp-python --upgrade --force-reinstall --no-cache-dir
|
7 |
+
venv/bin/python -m pip install 'llama-cpp-python[server]'
|
8 |
+
|
9 |
+
venv/bin/python -m pip install gradio
|
10 |
+
venv/bin/python -m pip install -r requirements.txt
|