Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
import streamlit as st
|
2 |
import logging
|
3 |
-
import time
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
5 |
|
6 |
# Set the logger to display only CRITICAL messages
|
7 |
logging.basicConfig(level=logging.CRITICAL)
|
8 |
|
9 |
-
|
10 |
# Cache the model and tokenizer to avoid reloading it every time
|
11 |
@st.experimental_singleton
|
12 |
def load_model():
|
@@ -31,13 +30,20 @@ user_input = st.text_area("Enter your prompt:", "")
|
|
31 |
|
32 |
if st.button("Submit"):
|
33 |
if user_input:
|
|
|
34 |
generated_text = generate_text(user_input)
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
else:
|
41 |
-
st.write("Please enter a prompt
|
42 |
-
|
43 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import logging
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
+
import time
|
5 |
|
6 |
# Set the logger to display only CRITICAL messages
|
7 |
logging.basicConfig(level=logging.CRITICAL)
|
8 |
|
|
|
9 |
# Cache the model and tokenizer to avoid reloading it every time
|
10 |
@st.experimental_singleton
|
11 |
def load_model():
|
|
|
30 |
|
31 |
if st.button("Submit"):
|
32 |
if user_input:
|
33 |
+
# Generate text based on the input
|
34 |
generated_text = generate_text(user_input)
|
35 |
+
|
36 |
+
# Initialize or reset the progress state
|
37 |
+
if 'text_progress' not in st.session_state:
|
38 |
+
st.session_state['text_progress'] = ""
|
39 |
+
|
40 |
+
# Simulate typing effect
|
41 |
+
for i in range(len(generated_text)):
|
42 |
+
st.session_state['text_progress'] = generated_text[:i+1]
|
43 |
+
st.write(st.session_state['text_progress'])
|
44 |
+
time.sleep(0.05) # Adjust as needed for the desired speed of the "typing" effect
|
45 |
+
|
46 |
+
# Reset progress for next input
|
47 |
+
del st.session_state['text_progress']
|
48 |
else:
|
49 |
+
st.write("Please enter a prompt.")
|
|
|
|