Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
5 |
# Set the logger to display only CRITICAL messages
|
6 |
logging.basicConfig(level=logging.CRITICAL)
|
7 |
|
|
|
8 |
# Cache the model and tokenizer to avoid reloading it every time
|
9 |
@st.experimental_singleton
|
10 |
def load_model():
|
@@ -27,11 +28,15 @@ st.title("Interact with Your Model")
|
|
27 |
# User input
|
28 |
user_input = st.text_area("Enter your prompt:", "")
|
29 |
|
30 |
-
|
31 |
-
if user_input:
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
else:
|
36 |
-
|
|
|
37 |
|
|
|
|
|
|
|
|
5 |
# Set the logger to display only CRITICAL messages
|
6 |
logging.basicConfig(level=logging.CRITICAL)
|
7 |
|
8 |
+
|
9 |
# Cache the model and tokenizer to avoid reloading it every time
|
10 |
@st.experimental_singleton
|
11 |
def load_model():
|
|
|
28 |
# User input
|
29 |
user_input = st.text_area("Enter your prompt:", "")
|
30 |
|
31 |
+
if st.button("Submit"):
|
32 |
+
if user_input:
|
33 |
+
# Generate text based on the input
|
34 |
+
generated_text = generate_text(user_input)
|
35 |
+
st.write(generated_text)
|
36 |
+
else:
|
37 |
+
st.write("Please enter a prompt.")
|
38 |
+
|
39 |
|
40 |
+
in here , i wanna get markdowned or real time responses , how cAN I DO THAT?
|
41 |
+
ChatGPT
|
42 |
+
To display real-time or interactive responses in a Streamlit application, you might want to leverage Streamlit's widgets and its ability to rerun the script whenever an input changes. However, for markdown or dynamically updating the UI based on the model's output, you'll need to adjust your approach slightly.
|