chat_test_2 / app.py
Kukedlc's picture
Update app.py
803a159 verified
raw history blame
No virus
3.01 kB
import json
import subprocess
import requests
import time
import socket
import gradio as gr
# Funci贸n para verificar si el servidor est谩 activo en el puerto
def is_server_active(host, port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex((host, port)) == 0
# Descarga y ejecuci贸n del modelo
url = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf?download=true"
response = requests.get(url)
with open("./model.gguf", mode="wb") as file:
file.write(response.content)
print("Model downloaded")
# Iniciar el servidor LLM y almacenar el proceso
command = ["python3", "-m", "llama_cpp.server", "--model", "./model.gguf", "--host", "0.0.0.0", "--port", "2600", "--n_threads", "2"]
server_process = subprocess.Popen(command) # Almacenamos el proceso para poder terminarlo m谩s tarde
print("Model server starting...")
# Esperar a que el servidor est茅 activo
while not is_server_active("0.0.0.0", 2600):
print("Waiting for server to start...")
time.sleep(5)
print("Model server is ready!")
def response(message, history):
url = "http://localhost:2600/v1/completions"
body = {"prompt": "[INST]"+message+"[/INST]", "max_tokens": 1024, "echo": False, "stream": False}
response_text = ""
try:
stream_response = requests.post(url, json=body, stream=True, timeout=60)
for text_chunk in stream_response.iter_content(chunk_size=None):
text = text_chunk.decode('utf-8')
# Imprimir la respuesta cruda para depuraci贸n
print("Respuesta cruda:", text)
if text.startswith("data: "):
text = text.replace("data: ", "")
if text.startswith("{") and "choices" in text:
try:
response_json = json.loads(text)
part = response_json["choices"][0]["text"]
print(part, end="", flush=True)
response_text += part
except json.JSONDecodeError as e:
print("Error al decodificar JSON:", e)
break
elif text.strip():
print("Respuesta no JSON:", text)
break
except requests.exceptions.RequestException as e:
print(f"Error al realizar la solicitud: {e}")
yield response_text
# Asegurarse de finalizar el proceso del servidor al finalizar el uso
def cleanup_server():
print("Closing server...")
server_process.terminate() # Env铆a la se帽al para terminar el proceso
server_process.wait() # Espera a que el proceso termine
print("Server closed.")
# Configuraci贸n de la interfaz de Gradio
gr_interface = gr.ChatInterface(
fn=response,
title="Mistral-7B-Instruct-v0.2-GGUF Eugenio Schiavoni Chatbot",
theme='syddharth/gray-minimal'
)
# A帽adir un paso de limpieza antes de cerrar la aplicaci贸n
gr.Interface.cleanup = cleanup_server
gr_interface.launch(share=True)