|
import subprocess |
|
import time |
|
from transformers import AutoModelForCausalLM |
|
|
|
def download_model(): |
|
model_name = "liuhaotian/llava-v1.5-7b" |
|
AutoModelForCausalLM.from_pretrained(model_name, use_cache=True) |
|
print(f"Model {model_name} downloaded successfully.") |
|
|
|
def start_controller(): |
|
subprocess.Popen(['python', '-m', 'llava.serve.controller', '--host', '0.0.0.0', '--port', '10000']) |
|
time.sleep(60) |
|
|
|
def start_gradio_web_server(): |
|
subprocess.Popen(['python', '-m', 'llava.serve.gradio_web_server', '--controller', 'http://localhost:10000', '--model-list-mode', 'reload']) |
|
time.sleep(60) |
|
|
|
def start_model_worker(): |
|
subprocess.Popen(['python', '-m', 'llava.serve.model_worker', '--host', '0.0.0.0', '--controller', 'http://localhost:10000', '--port', '40000', '--worker', 'http://localhost:40000', '--model-path', 'liuhaotian/llava-v1.5-7b']) |
|
|
|
if __name__ == "__main__": |
|
download_model() |
|
start_controller() |
|
start_gradio_web_server() |
|
start_model_worker() |
|
|
|
|
|
while True: |
|
time.sleep(3600) |
|
|