Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
import json | |
# Set up Streamlit UI | |
st.title("DeepSeek-R1 Chat") | |
# Ollama API endpoint (Ensure Ollama is running on this port) | |
OLLAMA_API_URL = "http://localhost:7860/api/generate" | |
# User input | |
prompt = st.text_area("Enter your prompt:") | |
# Function to call the Ollama API with streaming support | |
def generate_response(prompt, stream=True): | |
data = { | |
"model": "deepseek-r1:1.5b", # Keep the same model | |
"prompt": prompt, | |
"stream": stream # Enable streaming | |
} | |
try: | |
with requests.post(OLLAMA_API_URL, json=data, headers={"Content-Type": "application/json"}, stream=stream) as response: | |
response.raise_for_status() | |
if stream: | |
for chunk in response.iter_lines(decode_unicode=True): | |
if chunk: | |
chunk_data = json.loads(chunk) | |
yield chunk_data.get("response", "") | |
else: | |
response_data = response.json() | |
yield response_data.get("response", "No response received.") | |
except requests.exceptions.RequestException as e: | |
yield f"Error: {e}" | |
# string = '' | |
# for chunk in generate_response('hello!', stream=True): | |
# string += chunk | |
# Button to trigger response | |
if st.button("Generate Response"): | |
if prompt.strip(): | |
st.subheader("Ollama API Response:") | |
response_placeholder = st.empty() # Placeholder for streaming output | |
full_response = "" | |
for chunk in generate_response(prompt, stream=True): | |
full_response += chunk | |
response_placeholder.markdown(full_response) | |
else: | |
st.warning("Please enter a prompt.") | |