|
import os |
|
import requests |
|
|
|
def download(url): |
|
filename = os.path.basename(url) |
|
r = requests.get(url, stream=True) |
|
with open(f'models/{filename}', 'wb') as f: |
|
for chunk in r.iter_content(chunk_size=1024*1024): |
|
if chunk: |
|
f.write(chunk) |
|
f.flush() |
|
|
|
return filename |
|
|
|
def list_model(): |
|
if not os.path.exists('models'): |
|
os.makedirs('models') |
|
return os.listdir('models') |
|
|
|
def set_model(filename): |
|
if not os.path.exists(f'models/{filename}'): |
|
return False |
|
|
|
with open(f'models/set.txt', 'w', encoding='utf-8') as f: |
|
f.write(filename) |
|
|
|
cmd = f'venv/bin/python -m llama_cpp.server --model models/{filename} --port $1 --n_gpu_layers 999' |
|
with open(f'run.sh', 'w', encoding='utf-8') as f: |
|
f.write(cmd) |
|
|
|
return True |
|
|