Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import logging
|
|
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
4 |
|
5 |
# Set the logger to display only CRITICAL messages
|
@@ -29,10 +30,13 @@ st.title("Interact with Your Model")
|
|
29 |
user_input = st.text_area("Enter your prompt:", "")
|
30 |
|
31 |
if st.button("Submit"):
|
32 |
-
if user_input:
|
33 |
generated_text = generate_text(user_input)
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
36 |
else:
|
37 |
st.write("Please enter a prompt to get started.")
|
38 |
|
|
|
1 |
import streamlit as st
|
2 |
import logging
|
3 |
+
import time
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
5 |
|
6 |
# Set the logger to display only CRITICAL messages
|
|
|
30 |
user_input = st.text_area("Enter your prompt:", "")
|
31 |
|
32 |
if st.button("Submit"):
|
33 |
+
if user_input:
|
34 |
generated_text = generate_text(user_input)
|
35 |
+
for i in range(1, len(generated_text) + 1):
|
36 |
+
# Update the output placeholder with a substring of the generated text
|
37 |
+
output_placeholder.markdown(generated_text[:i])
|
38 |
+
# Sleep to simulate typing
|
39 |
+
time.sleep(0.1) # Adjust the sleep time to speed up or slow down the "typing"
|
40 |
else:
|
41 |
st.write("Please enter a prompt to get started.")
|
42 |
|