Konstantin commited on
Commit
bd8327d
β€’
1 Parent(s): 091e9bd

Add spaces application

Browse files
Files changed (4) hide show
  1. .gitignore +3 -0
  2. README.md +3 -3
  3. app.py +149 -0
  4. requirements.txt +4 -0
.gitignore CHANGED
@@ -3,3 +3,6 @@
3
  !.gitignore
4
  !.gitattributes
5
  !README.md
 
 
 
 
3
  !.gitignore
4
  !.gitattributes
5
  !README.md
6
+
7
+ !app.py
8
+ !requirements.txt
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: Toxic Comments German
3
- emoji: πŸ“š
4
  colorFrom: red
5
  colorTo: gray
6
  sdk: streamlit
@@ -26,7 +26,7 @@ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gr
26
  Can be either `gradio`, `streamlit`, or `static`
27
 
28
  `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
  See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
 
32
  `app_file`: _string_
 
1
  ---
2
+ title: Toxic Comment Detection German
3
+ emoji: 🀬
4
  colorFrom: red
5
  colorTo: gray
6
  sdk: streamlit
 
26
  Can be either `gradio`, `streamlit`, or `static`
27
 
28
  `sdk_version` : _string_
29
+ Only applicable for `streamlit` SDK.
30
  See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
 
32
  `app_file`: _string_
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import streamlit as st
4
+ from bs4 import BeautifulSoup
5
+
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
+ from transformers import pipeline
8
+ from transformers_interpret import SequenceClassificationExplainer
9
+
10
+
11
+ model_hub_url = 'https://huggingface.co/ml6team/distilbert-base-german-cased-toxic-comments'
12
+ model_name = 'ml6team/distilbert-base-german-cased-toxic-comments'
13
+
14
+ about_page_markdown = f"""# 🀬 Toxic Comment Detection Space
15
+
16
+ Made by [ML6](https://ml6.eu/).
17
+
18
+ Token attribution is performed using [transformers-interpret](https://github.com/cdpierse/transformers-interpret).
19
+ """
20
+
21
+ regular_emojis = [
22
+ '😐', 'πŸ™‚', 'πŸ‘Ά', 'πŸ˜‡',
23
+ ]
24
+ undecided_emojis = [
25
+ '🀨', '🧐', 'πŸ₯Έ', 'πŸ₯΄', '🀷',
26
+ ]
27
+ potty_mouth_emojis = [
28
+ '🀐', 'πŸ‘Ώ', '😑', '🀬', '☠️', '☣️', '☒️',
29
+ ]
30
+
31
+ # Page setup
32
+ st.set_page_config(
33
+ page_title="Toxic Comment Detection Space",
34
+ page_icon="🀬",
35
+ layout="centered",
36
+ initial_sidebar_state="auto",
37
+ menu_items={
38
+ 'Get help': None,
39
+ 'Report a bug': None,
40
+ 'About': about_page_markdown,
41
+ }
42
+ )
43
+
44
+ # Model setup
45
+ @st.cache(allow_output_mutation=True,
46
+ suppress_st_warning=True,
47
+ show_spinner=False)
48
+ def load_pipeline():
49
+ with st.spinner('Loading the model (this might take a while)...'):
50
+ toxicity_pipeline = pipeline(
51
+ 'text-classification',
52
+ model=model_name,
53
+ tokenizer=model_name)
54
+ cls_explainer = SequenceClassificationExplainer(
55
+ toxicity_pipeline.model,
56
+ toxicity_pipeline.tokenizer)
57
+ return toxicity_pipeline, cls_explainer
58
+
59
+ toxicity_pipeline, cls_explainer = load_pipeline()
60
+
61
+
62
+ # Auxiliary functions
63
+ def format_explainer_html(html_string):
64
+ """Extract tokens with attribution-based background color."""
65
+ soup = BeautifulSoup(html_string, 'html.parser')
66
+ p = soup.new_tag('p')
67
+ # Select token elements and remove model specific tokens
68
+ for token in soup.find_all('td')[-1].find_all('mark')[1:-1]:
69
+ p.append(token)
70
+ return p.prettify()
71
+
72
+
73
+ def classify_comment(comment):
74
+ """Classify the given comment and augment with additional information."""
75
+ result = toxicity_pipeline(comment)[0]
76
+
77
+ # Add explanation
78
+ result['word_attribution'] = cls_explainer(comment, class_name="non_toxic")
79
+ result['visualitsation_html'] = cls_explainer.visualize()._repr_html_()
80
+ result['tokens_with_background'] = format_explainer_html(
81
+ result['visualitsation_html'])
82
+
83
+ # Choose emoji reaction
84
+ label, score = result['label'], result['score']
85
+ if label == 'toxic' and score > 0.1:
86
+ emoji = random.choice(potty_mouth_emojis)
87
+ elif label == 'non_toxic' and score > 0.1:
88
+ emoji = random.choice(regular_emojis)
89
+ else:
90
+ emoji = random.choice(undecided_emojis)
91
+ result.update({'text': comment, 'emoji': emoji})
92
+
93
+ # Add result to session
94
+ st.session_state.results.append(result)
95
+
96
+
97
+ # Start session
98
+ if 'results' not in st.session_state:
99
+ st.session_state.results = []
100
+
101
+ # Page
102
+ st.title('🀬 German Toxic Comment Detection')
103
+ st.markdown("""This demo showcases the German toxic comment detection model.""")
104
+
105
+ # Introduction
106
+ st.markdown(f"""The model was trained using a sequence classification task on a combination of multiple German datasets containing toxicity, profanity, and hate speech. For a more comprehensive overview of the model check out the [model card on πŸ€— Model Hub]({model_hub_url}).
107
+ """)
108
+ st.markdown("""Enter a comment that you want to classify below. The model will determine the probability that it is toxic and highlights how much each token contributes to its decision:
109
+ <font color="black">
110
+ <span style="background-color: rgb(250, 219, 219); opacity: 1;">r</span><span style="background-color: rgb(244, 179, 179); opacity: 1;">e</span><span style="background-color: rgb(238, 135, 135); opacity: 1;">d</span>
111
+ </font>
112
+ tokens indicate toxicity whereas
113
+ <font color="black">
114
+ <span style="background-color: rgb(224, 251, 224); opacity: 1;">g</span><span style="background-color: rgb(197, 247, 197); opacity: 1;">re</span><span style="background-color: rgb(121, 236, 121); opacity: 1;">en</span>
115
+ </font> tokens indicate indicate the opposite.
116
+
117
+ Try it yourself! πŸ‘‡""",
118
+ unsafe_allow_html=True)
119
+
120
+ # Demo
121
+ with st.form("german-toxic-comment-detection-input", clear_on_submit=True):
122
+ text = st.text_area(
123
+ label='Enter the comment you want to classify below (in German):')
124
+ _, rightmost_col = st.columns([6,1])
125
+ submitted = rightmost_col.form_submit_button("Classify",
126
+ help="Classify comment")
127
+
128
+ # Listener
129
+ if submitted:
130
+ if text:
131
+ with st.spinner('Analysing comment...'):
132
+ classify_comment(text)
133
+ else:
134
+ st.error('**Error**: No comment to classify. Please provide a comment.')
135
+
136
+ # Results
137
+ if 'results' in st.session_state and st.session_state.results:
138
+ first = True
139
+ for result in st.session_state.results[::-1]:
140
+ if not first:
141
+ st.markdown("---")
142
+ st.markdown(f"Text:\n> {result['text']}")
143
+ col_1, col_2, col_3 = st.columns([1,2,2])
144
+ col_1.metric(label='', value=f"{result['emoji']}")
145
+ col_2.metric(label='Label', value=f"{result['label']}")
146
+ col_3.metric(label='Score', value=f"{result['score']:.3f}")
147
+ st.markdown(f"Token Attribution:\n{result['tokens_with_background']}",
148
+ unsafe_allow_html=True)
149
+ first = False
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ beautifulsoup4==4.10.0
2
+ streamlit==1.0.0
3
+ transformers==4.15.0
4
+ transformers-interpret==0.5.2