VisionZip / app_backup.py
Senqiao's picture
update the App.py follow the Raw HF LLaVA Space
0da41af
import subprocess
import time
from transformers import AutoModelForCausalLM
def download_model():
model_name = "liuhaotian/llava-v1.5-7b"
AutoModelForCausalLM.from_pretrained(model_name, use_cache=True)
print(f"Model {model_name} downloaded successfully.")
def start_controller():
subprocess.Popen(['python', '-m', 'llava.serve.controller', '--host', '0.0.0.0', '--port', '10000'])
time.sleep(60) # wait for the controller to start
def start_gradio_web_server():
subprocess.Popen(['python', '-m', 'llava.serve.gradio_web_server', '--controller', 'http://localhost:10000', '--model-list-mode', 'reload'])
time.sleep(60) # wait for the web server to start
def start_model_worker():
subprocess.Popen(['python', '-m', 'llava.serve.model_worker', '--host', '0.0.0.0', '--controller', 'http://localhost:10000', '--port', '40000', '--worker', 'http://localhost:40000', '--model-path', 'liuhaotian/llava-v1.5-7b'])
if __name__ == "__main__":
download_model() # Download first
start_controller() # Starts the controller process
start_gradio_web_server() # Starts the Gradio web server process
start_model_worker() # Starts the model worker process
# Keep the script running to maintain the subprocesses alive
while True:
time.sleep(3600) # Sleep for 1 hour, adjust as needed