jed-tiotuico commited on
Commit
595c6e5
1 Parent(s): 01c683d

fixed form

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -148,6 +148,12 @@ Now it's your turn, ensure to only generate one message
148
 
149
  st.header("ReplyCaddy")
150
  st.write("AI-powered customer support assistant. Reduces anxiety when responding to customer support on social media.")
 
 
 
 
 
 
151
  # image https://github.com/unslothai/unsloth/blob/main/images/made%20with%20unsloth.png?raw=true
152
  # st.write("Made with [Unsloth](https://github.com/unslothai/unsloth/blob/main/images/made%20with%20unsloth.png?raw=true")
153
 
@@ -296,25 +302,13 @@ if st.button("Generate Customer Message using Few Shots"):
296
  new_customer_msg = write_stream_user_chat_message(user_chat, model, tokenizer, few_shot_prompt)
297
  st.session_state["user_msg_as_prompt"] = new_customer_msg
298
 
299
-
300
- st.markdown("------------")
301
- st.markdown("<p>Thanks to:</p>", unsafe_allow_html=True)
302
- st.markdown("""Unsloth https://github.com/unslothai check out the [wiki](https://github.com/unslothai/unsloth/wiki)""")
303
- st.markdown("""Georgi Gerganov's ggml https://github.com/ggerganov/ggml""")
304
- st.markdown("""Meta's Llama https://github.com/meta-llama""")
305
- st.markdown("""Mistral AI - https://github.com/mistralai""")
306
- st.markdown("""Zhang Peiyuan's TinyLlama https://github.com/jzhang38/TinyLlama""")
307
- st.markdown("""Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois,
308
- Xuechen Li, Carlos Guestrin, Percy Liang, Tatsunori B. Hashimoto
309
- - [Alpaca: A Strong, Replicable Instruction-Following Model](https://crfm.stanford.edu/2023/03/13/alpaca.html)""")
310
-
311
  # main ui prompt
312
  # - text box
313
  # - submit
314
  with st.form(key="my_form"):
315
  customer_msg = st.text_area("Customer Message")
316
  write_user_chat_message(user_chat, customer_msg)
317
- if st.form_submit_button("Submit"):
318
  st.session_state["user_msg_as_prompt"] = customer_msg
319
  write_user_chat_message(user_chat, customer_msg)
320
  model, tokenizer = get_model_tokenizer(sota_model_name)
@@ -329,6 +323,17 @@ with st.form(key="my_form"):
329
  )
330
  )
331
 
 
 
 
 
 
 
 
 
 
 
 
332
  if True:
333
  gpu_stats = torch.cuda.get_device_properties(0)
334
  max_memory = gpu_stats.total_memory / 1024 ** 3
 
148
 
149
  st.header("ReplyCaddy")
150
  st.write("AI-powered customer support assistant. Reduces anxiety when responding to customer support on social media.")
151
+ st.markdown("""
152
+ Instructions:
153
+ 1. Click the Generate Customer Message using Few Shots button to generate a custom message
154
+ 2. Then click Generate Polite and Friendly Response
155
+ 3. Or Enter a custom message in the text box and click Generate Polite and Friendly Response
156
+ """)
157
  # image https://github.com/unslothai/unsloth/blob/main/images/made%20with%20unsloth.png?raw=true
158
  # st.write("Made with [Unsloth](https://github.com/unslothai/unsloth/blob/main/images/made%20with%20unsloth.png?raw=true")
159
 
 
302
  new_customer_msg = write_stream_user_chat_message(user_chat, model, tokenizer, few_shot_prompt)
303
  st.session_state["user_msg_as_prompt"] = new_customer_msg
304
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  # main ui prompt
306
  # - text box
307
  # - submit
308
  with st.form(key="my_form"):
309
  customer_msg = st.text_area("Customer Message")
310
  write_user_chat_message(user_chat, customer_msg)
311
+ if st.form_submit_button("Submit and Generate Response"):
312
  st.session_state["user_msg_as_prompt"] = customer_msg
313
  write_user_chat_message(user_chat, customer_msg)
314
  model, tokenizer = get_model_tokenizer(sota_model_name)
 
323
  )
324
  )
325
 
326
+ st.markdown("------------")
327
+ st.markdown("<p>Thanks to:</p>", unsafe_allow_html=True)
328
+ st.markdown("""Unsloth https://github.com/unslothai check out the [wiki](https://github.com/unslothai/unsloth/wiki)""")
329
+ st.markdown("""Georgi Gerganov's ggml https://github.com/ggerganov/ggml""")
330
+ st.markdown("""Meta's Llama https://github.com/meta-llama""")
331
+ st.markdown("""Mistral AI - https://github.com/mistralai""")
332
+ st.markdown("""Zhang Peiyuan's TinyLlama https://github.com/jzhang38/TinyLlama""")
333
+ st.markdown("""Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois,
334
+ Xuechen Li, Carlos Guestrin, Percy Liang, Tatsunori B. Hashimoto
335
+ - [Alpaca: A Strong, Replicable Instruction-Following Model](https://crfm.stanford.edu/2023/03/13/alpaca.html)""")
336
+
337
  if True:
338
  gpu_stats = torch.cuda.get_device_properties(0)
339
  max_memory = gpu_stats.total_memory / 1024 ** 3