RishabhBhardwaj commited on
Commit
ecf0f3e
1 Parent(s): 2ec2072

try fragments

Browse files
Files changed (1) hide show
  1. app.py +39 -18
app.py CHANGED
@@ -22,19 +22,15 @@ def load_model():
22
  return tokenizer, model
23
 
24
  # Function to load image from URL
25
- @st.cache_resource()
26
  def load_image_from_url(url):
27
  response = requests.get(url)
28
  img = Image.open(BytesIO(response.content))
29
  return img
30
 
31
- # Streamlit app
32
- st.title("Text Safety Evaluator")
33
-
34
- # User input
35
- user_input = st.text_area("Enter the text you want to evaluate:", height=100)
36
-
37
- if st.button("Evaluate"):
38
  if user_input:
39
  # Load model and tokenizer
40
  tokenizer, model = load_model()
@@ -53,20 +49,45 @@ if st.button("Evaluate"):
53
  prediction = 'unsafe' if 'unsafe' in output_decoded.lower() else 'safe'
54
 
55
  # Display results
56
- st.subheader("Evaluation Result:")
57
- st.write(f"The text is evaluated as: **{prediction.upper()}**")
58
-
59
  else:
60
- st.warning("Please enter some text to evaluate.")
 
61
 
62
- # Add logo at the bottom center
63
- col1, col2, col3 = st.columns([1,2,1])
64
- with col2:
 
 
 
 
 
 
 
 
 
 
 
 
65
  logo_url = "https://github.com/walledai/walledeval/assets/32847115/d8b1d14f-7071-448b-8997-2eeba4c2c8f6"
66
  logo = load_image_from_url(logo_url)
67
- st.image(logo, use_column_width=True, width=500) # Adjust the width as needed
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  # Add information about Walled Guard Advanced
70
  col1, col2, col3 = st.columns([1,2,1])
71
- with col2:
72
- st.info("For a more performant version, check out Walled Guard Advanced. Connect with us at admin@walled.ai for more information.")
 
22
  return tokenizer, model
23
 
24
  # Function to load image from URL
25
+ @st.cache_data()
26
  def load_image_from_url(url):
27
  response = requests.get(url)
28
  img = Image.open(BytesIO(response.content))
29
  return img
30
 
31
+ # Evaluation fragment
32
+ @st.experimental_fragment
33
+ def evaluate_text(user_input, result_container):
 
 
 
 
34
  if user_input:
35
  # Load model and tokenizer
36
  tokenizer, model = load_model()
 
49
  prediction = 'unsafe' if 'unsafe' in output_decoded.lower() else 'safe'
50
 
51
  # Display results
52
+ with result_container:
53
+ st.subheader("Evaluation Result:")
54
+ st.write(f"The text is evaluated as: **{prediction.upper()}**")
55
  else:
56
+ with result_container:
57
+ st.warning("Please enter some text to evaluate.")
58
 
59
+ # Streamlit app
60
+ st.title("Text Safety Evaluator")
61
+
62
+ # User input
63
+ user_input = st.text_area("Enter the text you want to evaluate:", height=100)
64
+
65
+ # Create an empty container for the result
66
+ result_container = st.empty()
67
+
68
+ if st.button("Evaluate"):
69
+ evaluate_text(user_input, result_container)
70
+
71
+ # Logo fragment
72
+ @st.experimental_fragment
73
+ def display_logo(logo_container):
74
  logo_url = "https://github.com/walledai/walledeval/assets/32847115/d8b1d14f-7071-448b-8997-2eeba4c2c8f6"
75
  logo = load_image_from_url(logo_url)
76
+ with logo_container:
77
+ st.image(logo, use_column_width=True, width=500) # Adjust the width as needed
78
+
79
+ # Info fragment
80
+ @st.experimental_fragment
81
+ def display_info(info_container):
82
+ with info_container:
83
+ st.info("For a more performant version, check out Walled Guard Advanced. Connect with us at admin@walled.ai for more information.")
84
+
85
+ # Add logo at the bottom center
86
+ col1, col2, col3 = st.columns([1,2,1])
87
+ logo_container = col2.empty()
88
+ display_logo(logo_container)
89
 
90
  # Add information about Walled Guard Advanced
91
  col1, col2, col3 = st.columns([1,2,1])
92
+ info_container = col2.empty()
93
+ display_info(info_container)