Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -66,26 +66,21 @@ import streamlit as st
|
|
66 |
#from llama_index.llms.ollama import Ollama
|
67 |
from myollama import Ollama
|
68 |
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
st.title("Paul Graham Information Fetcher")
|
73 |
-
|
74 |
-
# Text input for the query
|
75 |
-
query = st.text_input("Enter your query:", value="Who is Paul Graham?")
|
76 |
-
|
77 |
-
# Button to trigger the API call
|
78 |
-
if st.button("Get Response"):
|
79 |
-
with st.spinner("Fetching response..."):
|
80 |
-
try:
|
81 |
-
# Fetch the response from the model
|
82 |
-
resp = llm.complete(query)
|
83 |
-
# Display the response
|
84 |
-
st.success("Response fetched successfully!")
|
85 |
-
st.write(resp)
|
86 |
-
except Exception as e:
|
87 |
-
st.error(f"An error occurred: {e}")
|
88 |
-
|
89 |
-
# Run the Streamlit app
|
90 |
if __name__ == "__main__":
|
91 |
-
|
|
|
66 |
#from llama_index.llms.ollama import Ollama
|
67 |
from myollama import Ollama
|
68 |
|
69 |
+
def main():
|
70 |
+
st.title("Ollama LLM Demo")
|
71 |
+
|
72 |
+
# Get user input
|
73 |
+
prompt = st.text_input("Enter your prompt:", "What is the capital of France?")
|
74 |
+
|
75 |
+
if st.button("Get Response"):
|
76 |
+
# Create an instance of the Ollama class
|
77 |
+
llm = Ollama(model="llama2", request_timeout=60.0)
|
78 |
+
|
79 |
+
# Get the response from the LLM
|
80 |
+
response = llm.complete(prompt)
|
81 |
+
|
82 |
+
# Display the response
|
83 |
+
st.write(response)
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
if __name__ == "__main__":
|
86 |
+
main()
|