Abbeite commited on
Commit
688ce9d
1 Parent(s): 2bbef44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -5,6 +5,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
  # Set the logger to display only CRITICAL messages
6
  logging.basicConfig(level=logging.CRITICAL)
7
 
 
8
  # Cache the model and tokenizer to avoid reloading it every time
9
  @st.experimental_singleton
10
  def load_model():
@@ -27,11 +28,15 @@ st.title("Interact with Your Model")
27
  # User input
28
  user_input = st.text_area("Enter your prompt:", "")
29
 
30
- # Display the submit button and the output directly without needing to press a button
31
- if user_input: # This will update every time the user_input changes
32
- generated_text = generate_text(user_input)
33
- # Display as markdown
34
- st.markdown(generated_text)
35
- else:
36
- st.write("Please enter a prompt to get started.")
 
37
 
 
 
 
 
5
  # Set the logger to display only CRITICAL messages
6
  logging.basicConfig(level=logging.CRITICAL)
7
 
8
+
9
  # Cache the model and tokenizer to avoid reloading it every time
10
  @st.experimental_singleton
11
  def load_model():
 
28
  # User input
29
  user_input = st.text_area("Enter your prompt:", "")
30
 
31
+ if st.button("Submit"):
32
+ if user_input:
33
+ # Generate text based on the input
34
+ generated_text = generate_text(user_input)
35
+ st.write(generated_text)
36
+ else:
37
+ st.write("Please enter a prompt.")
38
+
39
 
40
+ in here , i wanna get markdowned or real time responses , how cAN I DO THAT?
41
+ ChatGPT
42
+ To display real-time or interactive responses in a Streamlit application, you might want to leverage Streamlit's widgets and its ability to rerun the script whenever an input changes. However, for markdown or dynamically updating the UI based on the model's output, you'll need to adjust your approach slightly.