Entz commited on
Commit
8fbe745
1 Parent(s): 0246830

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +90 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+ import time
4
+ import subprocess
5
+
6
+ print("Expanding user path for Ollama")
7
+ OLLAMA = os.path.expanduser("~/ollama")
8
+
9
+ print("Checking if Ollama exists at the path")
10
+ if not os.path.exists(OLLAMA):
11
+ print("Ollama not found, downloading it")
12
+ subprocess.run("curl -L https://ollama.com/download/ollama-linux-amd64 -o ~/ollama", shell=True)
13
+ os.chmod(OLLAMA, 0o755)
14
+
15
+ def ollama_service_thread():
16
+ print("Starting Ollama service thread")
17
+ subprocess.run("~/ollama serve", shell=True)
18
+
19
+ print("Creating and starting Ollama service thread")
20
+ OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread)
21
+ OLLAMA_SERVICE_THREAD.start()
22
+
23
+ print("Giving Ollama serve a moment to start")
24
+ time.sleep(10)
25
+
26
+ print("Setting model to 'gemma2'")
27
+ model = "gemma2"
28
+
29
+ print(f"Pulling model {model}")
30
+ subprocess.run(f"~/ollama pull {model}", shell=True)
31
+
32
+ ################################################
33
+ ################################################
34
+ import copy
35
+ import gradio as gr
36
+ from ollama import Client
37
+
38
+ print("Initializing Ollama client")
39
+ client = Client(host='http://localhost:11434', timeout=120)
40
+
41
+ print("Getting Hugging Face token and model ID from environment variables")
42
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
43
+ MODEL_ID = os.environ.get("MODEL_ID", "google/gemma-2-9b-it")
44
+ MODEL_NAME = MODEL_ID.split("/")[-1]
45
+
46
+ print("Setting up title and description for Gradio interface")
47
+ TITLE = "<h1><center>ollama-Chat</center></h1>"
48
+ DESCRIPTION = f"""
49
+ <h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
50
+ <p>Running on Ollama backend.</p>
51
+ """
52
+
53
+
54
+ CSS = """
55
+ .duplicate-button {
56
+ margin: auto !important;
57
+ color: white !important;
58
+ background: black !important;
59
+ border-radius: 100vh !important;
60
+ }
61
+ h3 {
62
+ text-align: center;
63
+ }
64
+ """
65
+ import streamlit as st
66
+ from llama_index.llms.ollama import Ollama
67
+
68
+ # Initialize the Ollama model
69
+ llm = Ollama(model="llama3", request_timeout=120.0)
70
+
71
+ st.title("Paul Graham Information Fetcher")
72
+
73
+ # Text input for the query
74
+ query = st.text_input("Enter your query:", value="Who is Paul Graham?")
75
+
76
+ # Button to trigger the API call
77
+ if st.button("Get Response"):
78
+ with st.spinner("Fetching response..."):
79
+ try:
80
+ # Fetch the response from the model
81
+ resp = llm.complete(query)
82
+ # Display the response
83
+ st.success("Response fetched successfully!")
84
+ st.write(resp)
85
+ except Exception as e:
86
+ st.error(f"An error occurred: {e}")
87
+
88
+ # Run the Streamlit app
89
+ if __name__ == "__main__":
90
+ st.run()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ollama
2
+ streamlit
3
+ llama_index.llms.ollama