AI Agent
Initial commit: Ollama server with Gradio interface
240219c
import gradio as gr
import subprocess
import os
import json
from typing import List, Dict, Any
def list_models() -> List[Dict[str, Any]]:
"""List available Ollama models"""
try:
result = subprocess.run(
["ollama", "list"],
capture_output=True,
text=True,
check=True
)
models = []
for line in result.stdout.splitlines():
if line.startswith("NAME"):
continue
name, id, size, modified = line.split("\t")
models.append({
"name": name,
"id": id,
"size": size,
"modified": modified
})
return models
except Exception as e:
return [{"error": str(e)}]
def pull_model(model_name: str) -> str:
"""Pull an Ollama model"""
try:
result = subprocess.run(
["ollama", "pull", model_name],
capture_output=True,
text=True,
check=True
)
return f"Pomyślnie pobrano model: {model_name}\n{result.stdout}"
except subprocess.CalledProcessError as e:
return f"Błąd podczas pobierania modelu: {e.stderr}"
except Exception as e:
return f"Nieoczekiwany błąd: {str(e)}"
def run_model(prompt: str, model_name: str = "llama3") -> str:
"""Run an Ollama model with a prompt"""
try:
result = subprocess.run(
["ollama", "run", model_name, prompt],
capture_output=True,
text=True,
check=True,
timeout=60 # 1 minute timeout
)
return result.stdout
except subprocess.CalledProcessError as e:
return f"Błąd podczas uruchamiania modelu:\n{e.stderr}"
except Exception as e:
return f"Nieoczekiwany błąd: {str(e)}"
def get_model_info(model_name: str) -> str:
"""Get information about a specific Ollama model"""
try:
result = subprocess.run(
["ollama", "show", model_name],
capture_output=True,
text=True,
check=True
)
return result.stdout
except Exception as e:
return f"Błąd podczas pobierania informacji o modelu: {str(e)}"
with gr.Blocks(title="Ollama Server Example", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🦙 Ollama Server Example\nProsty serwer Ollama na Hugging Face Spaces z interfejsem Gradio")
with gr.Tabs():
with gr.Tab("🔄 Uruchom Model"):
with gr.Row():
model_input = gr.Textbox(
label="Nazwa modelu (domyślnie: llama3)",
value="llama3",
interactive=True
)
prompt_input = gr.Textbox(
label="Prompt",
placeholder="Wprowadź tekst...",
lines=3
)
run_btn = gr.Button("🚀 Uruchom Model", variant="primary")
output_text = gr.Textbox(label="Wynik", lines=10)
run_btn.click(
fn=run_model,
inputs=[prompt_input, model_input],
outputs=output_text
)
with gr.Tab("📦 Pobierz Model"):
with gr.Row():
pull_model_input = gr.Textbox(
label="Nazwa modelu do pobrania (np. llama3, mistral)",
placeholder="llama3"
)
pull_btn = gr.Button("💾 Pobierz Model", variant="secondary")
pull_output = gr.Textbox(label="Status pobierania", lines=5)
pull_btn.click(
fn=pull_model,
inputs=pull_model_input,
outputs=pull_output
)
with gr.Tab("📊 Lista Modeli"):
refresh_btn = gr.Button("🔄 Odśwież Listę Modeli", variant="primary")
models_table = gr.JSON(label="Dostępne modele")
refresh_btn.click(
fn=list_models,
outputs=models_table
)
# Load models on startup
demo.load(
fn=list_models,
outputs=models_table
)
with gr.Tab("ℹ️ Informacje o Modelu"):
with gr.Row():
info_model_input = gr.Textbox(
label="Nazwa modelu",
value="llama3"
)
info_btn = gr.Button("📋 Pobierz Informacje", variant="secondary")
info_output = gr.Textbox(label="Informacje o modelu", lines=15)
info_btn.click(
fn=get_model_info,
inputs=info_model_input,
outputs=info_output
)
if __name__ == "__main__":
demo.launch()