holy-script commited on
Commit
c4447ff
·
verified ·
1 Parent(s): 8e1a34f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -52
app.py CHANGED
@@ -1,52 +1,52 @@
1
- import streamlit as st
2
- import requests
3
- import json
4
-
5
- # Set up Streamlit UI
6
- st.title("DeepSeek-R1 Chat")
7
-
8
- # Ollama API endpoint (Ensure Ollama is running on this port)
9
- OLLAMA_API_URL = "http://localhost:7860/api/generate"
10
-
11
- # User input
12
- prompt = st.text_area("Enter your prompt:")
13
-
14
- # Function to call the Ollama API with streaming support
15
- def generate_response(prompt, stream=True):
16
- data = {
17
- "model": "deepseek-r1:7b", # Keep the same model
18
- "prompt": prompt,
19
- "stream": stream # Enable streaming
20
- }
21
-
22
- try:
23
- with requests.post(OLLAMA_API_URL, json=data, headers={"Content-Type": "application/json"}, stream=stream) as response:
24
- response.raise_for_status()
25
- if stream:
26
- for chunk in response.iter_lines(decode_unicode=True):
27
- if chunk:
28
- chunk_data = json.loads(chunk)
29
- yield chunk_data.get("response", "")
30
- else:
31
- response_data = response.json()
32
- yield response_data.get("response", "No response received.")
33
- except requests.exceptions.RequestException as e:
34
- yield f"Error: {e}"
35
-
36
- string = ''
37
- for chunk in generate_response('hello!', stream=True):
38
- string += chunk
39
-
40
- # Button to trigger response
41
- if st.button("Generate Response"):
42
- if prompt.strip():
43
- st.subheader("Ollama API Response:")
44
- response_placeholder = st.empty() # Placeholder for streaming output
45
-
46
- full_response = ""
47
- for chunk in generate_response(prompt, stream=True):
48
- full_response += chunk
49
- response_placeholder.markdown(full_response)
50
-
51
- else:
52
- st.warning("Please enter a prompt.")
 
1
+ # import streamlit as st
2
+ # import requests
3
+ # import json
4
+
5
+ # # Set up Streamlit UI
6
+ # st.title("DeepSeek-R1 Chat")
7
+
8
+ # # Ollama API endpoint (Ensure Ollama is running on this port)
9
+ # OLLAMA_API_URL = "http://localhost:7860/api/generate"
10
+
11
+ # # User input
12
+ # prompt = st.text_area("Enter your prompt:")
13
+
14
+ # # Function to call the Ollama API with streaming support
15
+ # def generate_response(prompt, stream=True):
16
+ # data = {
17
+ # "model": "deepseek-r1:7b", # Keep the same model
18
+ # "prompt": prompt,
19
+ # "stream": stream # Enable streaming
20
+ # }
21
+
22
+ # try:
23
+ # with requests.post(OLLAMA_API_URL, json=data, headers={"Content-Type": "application/json"}, stream=stream) as response:
24
+ # response.raise_for_status()
25
+ # if stream:
26
+ # for chunk in response.iter_lines(decode_unicode=True):
27
+ # if chunk:
28
+ # chunk_data = json.loads(chunk)
29
+ # yield chunk_data.get("response", "")
30
+ # else:
31
+ # response_data = response.json()
32
+ # yield response_data.get("response", "No response received.")
33
+ # except requests.exceptions.RequestException as e:
34
+ # yield f"Error: {e}"
35
+
36
+ # # string = ''
37
+ # # for chunk in generate_response('hello!', stream=True):
38
+ # # string += chunk
39
+
40
+ # # Button to trigger response
41
+ # if st.button("Generate Response"):
42
+ # if prompt.strip():
43
+ # st.subheader("Ollama API Response:")
44
+ # response_placeholder = st.empty() # Placeholder for streaming output
45
+
46
+ # full_response = ""
47
+ # for chunk in generate_response(prompt, stream=True):
48
+ # full_response += chunk
49
+ # response_placeholder.markdown(full_response)
50
+
51
+ # else:
52
+ # st.warning("Please enter a prompt.")