devanshsrivastav commited on
Commit
3b1e528
1 Parent(s): 7224958

added sexism detection

Browse files
.github/workflows/main.yml CHANGED
@@ -1,6 +1,7 @@
1
  name: Sync to Hugging Face hub
2
  on:
3
  push:
 
4
  branches: [main]
5
 
6
  # to run this workflow manually from the Actions tab
 
1
  name: Sync to Hugging Face hub
2
  on:
3
  push:
4
+ pull_request:
5
  branches: [main]
6
 
7
  # to run this workflow manually from the Actions tab
README.md CHANGED
@@ -31,7 +31,8 @@ pip install -r requirements.txt
31
  ## Usage
32
 
33
  - Navigate to the root directory of the project.
34
- - Run the Streamlit app by typing `streamlit run EDxHuggingface.py` in the command line.
 
35
 
36
  - A web-based dashboard will open in your default browser.
37
  - Type or paste a text input in the text box provided.
 
31
  ## Usage
32
 
33
  - Navigate to the root directory of the project.
34
+ - Run the Streamlit app by typing `streamlit run app.py` or `python -m streamlit run app.py` in the command line.
35
+ - For GitHub Codespaces, Run: `python -m streamlit run app.py --server.enableCORS false --server.enableXsrfProtection false`
36
 
37
  - A web-based dashboard will open in your default browser.
38
  - Type or paste a text input in the text box provided.
app.py CHANGED
@@ -13,6 +13,8 @@ HF_API_KEY = os.getenv("HF_API_KEY")
13
  # API_URL_ED = "https://api-inference.huggingface.co/models/j-hartmann/emotion-english-distilroberta-base" #alternate ED model(slow loading on first run)
14
  API_URL_ED = "https://api-inference.huggingface.co/models/bhadresh-savani/bert-base-go-emotion"
15
  API_URL_HS = "https://api-inference.huggingface.co/models/IMSyPP/hate_speech_en"
 
 
16
  headers = {"Authorization": f"Bearer {HF_API_KEY}"}
17
 
18
  st.set_page_config(
@@ -26,7 +28,8 @@ st.title("GoEmotions Dashboard - Analyzing Emotions in Text")
26
  def query(payload):
27
  response_ED = requests.request("POST", API_URL_ED, headers=headers, json=payload)
28
  response_HS = requests.request("POST", API_URL_HS, headers=headers, json=payload)
29
- return (json.loads(response_ED.content.decode("utf-8")),json.loads(response_HS.content.decode("utf-8")))
 
30
 
31
  # Define color map for each emotion category
32
  color_map = {
@@ -64,6 +67,7 @@ color_map = {
64
  # Labels for Hate Speech Classification
65
  label_hs = {"LABEL_0": "Acceptable", "LABEL_1": "Inappropriate", "LABEL_2": "Offensive", "LABEL_3": "Violent"}
66
 
 
67
  # Define default options
68
 
69
  default_options = [
@@ -100,18 +104,16 @@ if submit:
100
 
101
  # Call API and get predicted probabilities for each emotion category and hate speech classification
102
  payload = {"inputs": text_input, "options": {"wait_for_model": True, "use_cache": True}}
103
- response_ED, response_HS = query(payload)
104
  predicted_probabilities_ED = response_ED[0]
105
  predicted_probabilities_HS = response_HS[0]
 
106
 
107
- ED, _, HS = st.columns([5,2,3])
108
 
109
  with ED:
110
- # Sort the predicted probabilities in descending order
111
- sorted_probs_ED = sorted(predicted_probabilities_ED, key=lambda x: x['score'], reverse=True)
112
-
113
  # Get the top 4 emotion categories and their scores
114
- top_emotions = sorted_probs_ED[:4]
115
  top_scores = [e['score'] for e in top_emotions]
116
 
117
  # Normalize the scores so that they add up to 100%
@@ -160,6 +162,7 @@ if submit:
160
 
161
  with _:
162
  st.text("")
 
163
 
164
  with HS:
165
  # Display Hate Speech Classification
@@ -172,9 +175,25 @@ if submit:
172
  st.image(f"assets/{hate_detection}.jpg", width=200)
173
  st.text("")
174
  st.text("")
 
 
 
 
 
 
 
 
 
 
175
  st.text("")
 
 
176
  st.text("")
177
- st.markdown(f"#### The given text is {hate_detection}")
 
 
 
 
178
 
179
 
180
 
 
13
  # API_URL_ED = "https://api-inference.huggingface.co/models/j-hartmann/emotion-english-distilroberta-base" #alternate ED model(slow loading on first run)
14
  API_URL_ED = "https://api-inference.huggingface.co/models/bhadresh-savani/bert-base-go-emotion"
15
  API_URL_HS = "https://api-inference.huggingface.co/models/IMSyPP/hate_speech_en"
16
+ API_URL_SD = "https://api-inference.huggingface.co/models/NLP-LTU/bertweet-large-sexism-detector"
17
+
18
  headers = {"Authorization": f"Bearer {HF_API_KEY}"}
19
 
20
  st.set_page_config(
 
28
  def query(payload):
29
  response_ED = requests.request("POST", API_URL_ED, headers=headers, json=payload)
30
  response_HS = requests.request("POST", API_URL_HS, headers=headers, json=payload)
31
+ response_SD = requests.request("POST", API_URL_SD, headers=headers, json=payload)
32
+ return (json.loads(response_ED.content.decode("utf-8")),json.loads(response_HS.content.decode("utf-8")),json.loads(response_SD.content.decode("utf-8")))
33
 
34
  # Define color map for each emotion category
35
  color_map = {
 
67
  # Labels for Hate Speech Classification
68
  label_hs = {"LABEL_0": "Acceptable", "LABEL_1": "Inappropriate", "LABEL_2": "Offensive", "LABEL_3": "Violent"}
69
 
70
+
71
  # Define default options
72
 
73
  default_options = [
 
104
 
105
  # Call API and get predicted probabilities for each emotion category and hate speech classification
106
  payload = {"inputs": text_input, "options": {"wait_for_model": True, "use_cache": True}}
107
+ response_ED, response_HS, response_SD = query(payload)
108
  predicted_probabilities_ED = response_ED[0]
109
  predicted_probabilities_HS = response_HS[0]
110
+ predicted_probabilities_SD = response_SD[0]
111
 
112
+ ED, _, HS, __, SD = st.columns([4,1,2,1,2])
113
 
114
  with ED:
 
 
 
115
  # Get the top 4 emotion categories and their scores
116
+ top_emotions = predicted_probabilities_ED[:4]
117
  top_scores = [e['score'] for e in top_emotions]
118
 
119
  # Normalize the scores so that they add up to 100%
 
162
 
163
  with _:
164
  st.text("")
165
+
166
 
167
  with HS:
168
  # Display Hate Speech Classification
 
175
  st.image(f"assets/{hate_detection}.jpg", width=200)
176
  st.text("")
177
  st.text("")
178
+ st.markdown(f"#### The given text is: {hate_detection}")
179
+
180
+ with __:
181
+ st.text("")
182
+
183
+ with SD:
184
+ st.text("")
185
+ st.text("")
186
+ st.text("")
187
+ st.subheader("Sexism Detection")
188
  st.text("")
189
+ label_SD = predicted_probabilities_SD[0]['label'].capitalize()
190
+ st.image(f"assets/{label_SD}.jpg", width=200)
191
  st.text("")
192
+ st.text("")
193
+ st.markdown(f"#### The given text is: {label_SD}")
194
+
195
+
196
+
197
 
198
 
199
 
assets/Not Sexist.jpg ADDED
assets/Sexist.jpg ADDED