wissamantoun commited on
Commit
c64d018
1 Parent(s): 3831b68
Files changed (3) hide show
  1. app.py +2 -2
  2. backend/sa.py +7 -4
  3. backend/services.py +8 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import streamlit as st
3
 
4
  from backend.utils import get_current_ram_usage
5
 
6
- # import backend.aragpt
7
  import backend.home
8
  import backend.processor
9
  import backend.sa
@@ -15,7 +15,7 @@ st.set_page_config(
15
  PAGES = {
16
  "Home": backend.home,
17
  "Arabic Text Preprocessor": backend.processor,
18
- # "Arabic Language Generation": backend.aragpt,
19
  "Arabic Sentiment Analysis": backend.sa,
20
  }
21
 
 
3
 
4
  from backend.utils import get_current_ram_usage
5
 
6
+ import backend.aragpt
7
  import backend.home
8
  import backend.processor
9
  import backend.sa
 
15
  PAGES = {
16
  "Home": backend.home,
17
  "Arabic Text Preprocessor": backend.processor,
18
+ "Arabic Language Generation": backend.aragpt,
19
  "Arabic Sentiment Analysis": backend.sa,
20
  }
21
 
backend/sa.py CHANGED
@@ -23,9 +23,9 @@ def write():
23
  if st.checkbox("More info: "):
24
  st.markdown(
25
  """
26
- ###Submission Description:
27
 
28
- My submission is based on an ensemble of 5 models with varying preprocessing, and classifier design. All model variants are built over MARBERT [?], which is a BERT-based model pre-trained on 1B dialectal Arabic tweets.
29
 
30
  For preprocessing, all models shared the following steps:
31
  - Replacing user mentions with “USER” and links with “URL”
@@ -45,16 +45,19 @@ def write():
45
  Model I is a vanilla variant with only the preprocessing steps mention above applied. Model II enhances the emoji representation by replacing OOV emojis with ones that have similar meaning, for example 💊  😷.
46
  We noticed the repetitive use of “السلام عليكم” and “ورحمة الله وبركاته” in neutral tweets, especially when users were directing questions to business accounts. This could confuse the classifier, if it encountered these words in a for example a negative tweet, hence in Model III we removed variation of the phrase mentioned before using fuzzy matching algorithms.
47
 
48
- In Model IV, we tried to help the model by appending a sarcasm label to the input. We first trained a separate MARBERT on the ArSarcasm [?] dataset, and then used it to label the training and test sets.
49
 
50
- Model V uses the vanilla preprocessing approach, but instead of a dense layer built on top of MARBERT, we follow the approach detailed by Safaya et.al. [?] which uses a CNN-based classifier instead.
51
 
52
  For the final prediction, we first average the predictions of the 5 models from cross-validation (this is done for each model separately), we then average the results from the 5 model variants. We observed that the distribution of the predicted sentiment classes, doesn’t quite match the true distribution, this is due to the model preferring the neutral class over the positive class. To counter that, we apply what we call Label-Weighted average where during after the final averaging we rescale the score with the following weights 1.57,0.98 and 0.93 for positive, neutral, and negative (note that the weights were determined empirically).
53
 
54
  1- https://aclanthology.org/2021.acl-long.551/
 
55
  2- https://github.com/iabufarha/ArSarcasm
 
56
  3- https://github.com/alisafaya/OffensEval2020
57
 
 
58
  """
59
  )
60
  input_text = st.text_input(
 
23
  if st.checkbox("More info: "):
24
  st.markdown(
25
  """
26
+ ### Submission Description:
27
 
28
+ My submission is based on an ensemble of 5 models with varying preprocessing, and classifier design. All model variants are built over MARBERT [1], which is a BERT-based model pre-trained on 1B dialectal Arabic tweets.
29
 
30
  For preprocessing, all models shared the following steps:
31
  - Replacing user mentions with “USER” and links with “URL”
 
45
  Model I is a vanilla variant with only the preprocessing steps mention above applied. Model II enhances the emoji representation by replacing OOV emojis with ones that have similar meaning, for example 💊  😷.
46
  We noticed the repetitive use of “السلام عليكم” and “ورحمة الله وبركاته” in neutral tweets, especially when users were directing questions to business accounts. This could confuse the classifier, if it encountered these words in a for example a negative tweet, hence in Model III we removed variation of the phrase mentioned before using fuzzy matching algorithms.
47
 
48
+ In Model IV, we tried to help the model by appending a sarcasm label to the input. We first trained a separate MARBERT on the ArSarcasm [2] dataset, and then used it to label the training and test sets.
49
 
50
+ Model V uses the vanilla preprocessing approach, but instead of a dense layer built on top of MARBERT, we follow the approach detailed by Safaya et.al. [3] which uses a CNN-based classifier instead.
51
 
52
  For the final prediction, we first average the predictions of the 5 models from cross-validation (this is done for each model separately), we then average the results from the 5 model variants. We observed that the distribution of the predicted sentiment classes, doesn’t quite match the true distribution, this is due to the model preferring the neutral class over the positive class. To counter that, we apply what we call Label-Weighted average where during after the final averaging we rescale the score with the following weights 1.57,0.98 and 0.93 for positive, neutral, and negative (note that the weights were determined empirically).
53
 
54
  1- https://aclanthology.org/2021.acl-long.551/
55
+
56
  2- https://github.com/iabufarha/ArSarcasm
57
+
58
  3- https://github.com/alisafaya/OffensEval2020
59
 
60
+
61
  """
62
  )
63
  input_text = st.text_input(
backend/services.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import os
3
  from typing import List
4
-
5
  import more_itertools
6
  import pandas as pd
7
  import requests
@@ -13,6 +13,7 @@ from .preprocess import ArabertPreprocessor
13
  from .sa_utils import *
14
  from .utils import download_models, softmax
15
 
 
16
  # Taken and Modified from https://huggingface.co/spaces/flax-community/chef-transformer/blob/main/app.py
17
  class TextGeneration:
18
  def __init__(self):
@@ -244,6 +245,7 @@ class SentimentAnalyzer:
244
  return final_labels, final_scores
245
 
246
  def get_preds_from_a_model(self, texts: List[str], model_name):
 
247
  prep = self.processors[model_name]
248
 
249
  prep_texts = [prep.preprocess(x) for x in texts]
@@ -257,7 +259,7 @@ class SentimentAnalyzer:
257
  preds_df = pd.DataFrame([])
258
  for i in range(0, 5):
259
  preds = []
260
- for s in tqdm(more_itertools.chunked(list(prep_texts), 128)):
261
  preds.extend(self.pipelines[model_name][i](s))
262
  preds_df[f"model_{i}"] = preds
263
 
@@ -295,6 +297,7 @@ class SentimentAnalyzer:
295
  return final_labels, final_scores, final_scores_list
296
 
297
  def predict(self, texts: List[str]):
 
298
  (
299
  new_balanced_label,
300
  new_balanced_score,
@@ -348,4 +351,7 @@ class SentimentAnalyzer:
348
  softmax(np.array([pos_score, neu_score, neg_score])).tolist()
349
  )
350
 
 
 
 
351
  return final_ensemble_prediction, final_ensemble_score, final_ensemble_all_score
 
1
  import json
2
  import os
3
  from typing import List
4
+ import logging
5
  import more_itertools
6
  import pandas as pd
7
  import requests
 
13
  from .sa_utils import *
14
  from .utils import download_models, softmax
15
 
16
+ logger = logging.getLogger(__name__)
17
  # Taken and Modified from https://huggingface.co/spaces/flax-community/chef-transformer/blob/main/app.py
18
  class TextGeneration:
19
  def __init__(self):
 
245
  return final_labels, final_scores
246
 
247
  def get_preds_from_a_model(self, texts: List[str], model_name):
248
+
249
  prep = self.processors[model_name]
250
 
251
  prep_texts = [prep.preprocess(x) for x in texts]
 
259
  preds_df = pd.DataFrame([])
260
  for i in range(0, 5):
261
  preds = []
262
+ for s in more_itertools.chunked(list(prep_texts), 128):
263
  preds.extend(self.pipelines[model_name][i](s))
264
  preds_df[f"model_{i}"] = preds
265
 
 
297
  return final_labels, final_scores, final_scores_list
298
 
299
  def predict(self, texts: List[str]):
300
+ logger.info(f"Predicting for: {texts}")
301
  (
302
  new_balanced_label,
303
  new_balanced_score,
 
351
  softmax(np.array([pos_score, neu_score, neg_score])).tolist()
352
  )
353
 
354
+ logger.info(f"Result: {final_ensemble_prediction}")
355
+ logger.info(f"Score: {final_ensemble_score}")
356
+ logger.info(f"All Scores: {final_ensemble_all_score}")
357
  return final_ensemble_prediction, final_ensemble_score, final_ensemble_all_score