Entz's picture
Update app.py
3d53a45 verified
raw
history blame
No virus
2.58 kB
import os
import threading
import time
import subprocess
print("Expanding user path for Ollama")
OLLAMA = os.path.expanduser("~/ollama")
print("Checking if Ollama exists at the path")
if not os.path.exists(OLLAMA):
print("Ollama not found, downloading it")
subprocess.run("curl -L https://ollama.com/download/ollama-linux-amd64 -o ~/ollama", shell=True)
os.chmod(OLLAMA, 0o755)
def ollama_service_thread():
print("Starting Ollama service thread")
subprocess.run("~/ollama serve", shell=True)
print("Creating and starting Ollama service thread")
OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread)
OLLAMA_SERVICE_THREAD.start()
print("Giving Ollama serve a moment to start")
time.sleep(10)
print("Setting model to 'llama3'")
model = "llama3"
print(f"Pulling model {model}")
subprocess.run(f"~/ollama pull {model}", shell=True)
################################################
################################################
import copy
import gradio as gr
from ollama import Client
print("Initializing Ollama client")
client = Client(host='http://localhost:11435', timeout=60)
print("Getting Hugging Face token and model ID from environment variables")
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL_ID = os.environ.get("MODEL_ID", "meta-llama/Meta-Llama-3-8B")
MODEL_NAME = MODEL_ID.split("/")[-1]
print("Setting up title and description for Gradio interface")
TITLE = "<h1><center>ollama-Chat</center></h1>"
DESCRIPTION = f"""
<h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
<p>Running on Ollama backend.</p>
"""
CSS = """
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
h3 {
text-align: center;
}
"""
import streamlit as st
#from llama_index.llms.ollama import Ollama
from myollama import Ollama
# Initialize the Ollama model
llm = Ollama(model="llama3", request_timeout=120.0)
st.title("Paul Graham Information Fetcher")
# Text input for the query
query = st.text_input("Enter your query:", value="Who is Paul Graham?")
# Button to trigger the API call
if st.button("Get Response"):
with st.spinner("Fetching response..."):
try:
# Fetch the response from the model
resp = llm.complete(query)
# Display the response
st.success("Response fetched successfully!")
st.write(resp)
except Exception as e:
st.error(f"An error occurred: {e}")
# Run the Streamlit app
if __name__ == "__main__":
st.run()