Spaces:
Sleeping
Sleeping
import os | |
import threading | |
import subprocess | |
import gradio as gr | |
from llama_index.llms.ollama import Ollama | |
from llama_index.core.llms import ChatMessage | |
# Fonction pour démarrer le serveur Ollama | |
def start_ollama(): | |
os.environ['OLLAMA_HOST'] = '127.0.0.1:11434' | |
os.environ['OLLAMA_ORIGINS'] = '*' | |
subprocess.run(["ollama", "serve"]) | |
# Démarrer le serveur Ollama dans un thread séparé | |
ollama_thread = threading.Thread(target=start_ollama) | |
ollama_thread.start() | |
# Initialiser le modèle Ollama | |
llm = Ollama(model="llama3", request_timeout=120.0) | |
def get_completion(prompt): | |
response = llm.complete(prompt) | |
return response | |
def chat_with_llm(messages): | |
chat_messages = [ChatMessage(role=msg["role"], content=msg["content"]) for msg in messages] | |
response = llm.chat(chat_messages) | |
return response | |
def generate_response(prompt): | |
return get_completion(prompt) | |
def generate_chat_response(history): | |
messages = [{"role": "system", "content": "You are a pirate with a colorful personality"}] | |
for item in history: | |
messages.append({"role": "user", "content": item[0]}) | |
if item[1]: | |
messages.append({"role": "assistant", "content": item[1]}) | |
response = chat_with_llm(messages) | |
return response["content"] | |
single_input = gr.Textbox(lines=2, placeholder="Enter your prompt here...") | |
single_output = gr.Textbox() | |
single_interface = gr.Interface(fn=generate_response, inputs=single_input, outputs=single_output, title="LLM Single Completion") | |
chat_input = gr.Chatbot() | |
chat_output = gr.Textbox() | |
chat_interface = gr.Interface(fn=generate_chat_response, inputs=chat_input, outputs=chat_output, title="LLM Chat") | |
app = gr.TabbedInterface([single_interface, chat_interface], ["Single Completion", "Chat"]) | |
if __name__ == "__main__": | |
app.launch(server_name="0.0.0.0", server_port=7860) | |