xd / ollama_chatbotTTS.py
jnjj's picture
Update ollama_chatbotTTS.py
3a7237d verified
import ollama
import json
from pydantic import BaseModel
class ChatResponse(BaseModel):
response: str
class OllamaChat:
response: str
def __init__(self):
"""Initialize the Ollama model and pull it locally if needed."""
# Modelo que queremos usar
self.model = "hf.co/jnjj/vcvcvcv:Q4_0"
self.system_prompt = (
"You are a concise and natural-sounding assistant. "
"Answer questions briefly, in one or two sentences at most, "
"as if responding for text-to-speech (TTS). Keep it natural and conversational."
)
# Intentamos descargar (pull) el modelo
try:
print(f"Descargando el modelo {self.model}…")
ollama.pull(model=self.model)
print("Modelo descargado correctamente.")
except Exception as e:
print(f"No se pudo descargar el modelo: {e}")
def get_response(self, user_input: str) -> str:
"""Processes the input text using Ollama and returns only the response string."""
result = ollama.chat(
model=self.model,
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": user_input}
],
format=ChatResponse.model_json_schema()
)
# Extraemos el campo "response" del JSON devuelto
response = json.loads(result["message"]["content"])["response"]
return response
# Ejemplo de uso
if __name__ == "__main__":
ollama_chat = OllamaChat()
user_text = input("Enter your question: ")
print("\nOllama Response:\n", ollama_chat.get_response(user_text))