RealKintaro commited on
Commit
092cded
1 Parent(s): 8339ff6
Files changed (1) hide show
  1. Deployment/app.py +6 -6
Deployment/app.py CHANGED
@@ -171,7 +171,7 @@ if st.session_state['Loaded'] == False:
171
  # Offensiveness detection model
172
 
173
  offensive_model = BertClassifier()
174
- offensive_model.load_state_dict(torch.load(os.path.join(parent_path,'models/modelv3.pt')))
175
  offensive_tokenizer = BertTokenizer.from_pretrained('aubmindlab/bert-base-arabertv02', do_lower_case=True)
176
 
177
  #send model to device
@@ -193,7 +193,7 @@ if st.session_state['Loaded'] == False:
193
  racism_model,religionhate_model,verbalabuse_model,misogyny_model = MediumBert(),MediumBert(),MediumBert(),MediumBert()
194
  ################################################################
195
 
196
- racism_model.load_state_dict(torch.load(os.path.join(parent_path,'models/racism/racism_arabert.pt')))
197
  racism_dict = {0: 'non_racist', 1: 'racist'}
198
 
199
  racism_model = racism_model.to(device)
@@ -204,7 +204,7 @@ if st.session_state['Loaded'] == False:
204
  print('Racism model loaded')
205
  ################################################################
206
 
207
- religionhate_model.load_state_dict(torch.load(os.path.join(parent_path,'models/religion_hate/religion_hate_params.pt')))
208
  religionhate_dict = {0: 'Religion Hate', 1: 'Not Religion Hate'}
209
 
210
  religionhate_model = religionhate_model.to(device)
@@ -215,7 +215,7 @@ if st.session_state['Loaded'] == False:
215
  print('Religion Hate model loaded')
216
  ################################################################
217
 
218
- verbalabuse_model.load_state_dict(torch.load(os.path.join(parent_path,'models/verbal_abuse/verbal_abuse_arabert.pt')))
219
  verbalabuse_dict = {0: 'Verbal Abuse', 1: 'Not Verbal Abuse'}
220
 
221
  verbalabuse_model=verbalabuse_model.to(device)
@@ -226,7 +226,7 @@ if st.session_state['Loaded'] == False:
226
  print('Verbal Abuse model loaded')
227
  ################################################################
228
 
229
- misogyny_model.load_state_dict(torch.load(os.path.join(parent_path,'models/misogyny/misogyny.pt')))
230
  misogyny_dict = {0: 'misogyny', 1: 'non_misogyny'}
231
 
232
  misogyny_model=misogyny_model.to(device)
@@ -241,7 +241,7 @@ if st.session_state['Loaded'] == False:
241
  # Dialect detection model
242
 
243
  dialect_model = Dialect_Detection(10)
244
- dialect_model.load_state_dict(torch.load(os.path.join(parent_path,'models/dialect_classifier.pt')))
245
 
246
  dialect_model = dialect_model.to(device)
247
 
 
171
  # Offensiveness detection model
172
 
173
  offensive_model = BertClassifier()
174
+ offensive_model.load_state_dict(torch.load(os.path.join(parent_path,'models/modelv3.pt'), map_location=torch.device('cpu')))
175
  offensive_tokenizer = BertTokenizer.from_pretrained('aubmindlab/bert-base-arabertv02', do_lower_case=True)
176
 
177
  #send model to device
 
193
  racism_model,religionhate_model,verbalabuse_model,misogyny_model = MediumBert(),MediumBert(),MediumBert(),MediumBert()
194
  ################################################################
195
 
196
+ racism_model.load_state_dict(torch.load(os.path.join(parent_path,'models/racism/racism_arabert.pt'), map_location=torch.device('cpu')))
197
  racism_dict = {0: 'non_racist', 1: 'racist'}
198
 
199
  racism_model = racism_model.to(device)
 
204
  print('Racism model loaded')
205
  ################################################################
206
 
207
+ religionhate_model.load_state_dict(torch.load(os.path.join(parent_path,'models/religion_hate/religion_hate_params.pt'), map_location=torch.device('cpu')))
208
  religionhate_dict = {0: 'Religion Hate', 1: 'Not Religion Hate'}
209
 
210
  religionhate_model = religionhate_model.to(device)
 
215
  print('Religion Hate model loaded')
216
  ################################################################
217
 
218
+ verbalabuse_model.load_state_dict(torch.load(os.path.join(parent_path,'models/verbal_abuse/verbal_abuse_arabert.pt'), map_location=torch.device('cpu')))
219
  verbalabuse_dict = {0: 'Verbal Abuse', 1: 'Not Verbal Abuse'}
220
 
221
  verbalabuse_model=verbalabuse_model.to(device)
 
226
  print('Verbal Abuse model loaded')
227
  ################################################################
228
 
229
+ misogyny_model.load_state_dict(torch.load(os.path.join(parent_path,'models/misogyny/misogyny.pt'), map_location=torch.device('cpu')))
230
  misogyny_dict = {0: 'misogyny', 1: 'non_misogyny'}
231
 
232
  misogyny_model=misogyny_model.to(device)
 
241
  # Dialect detection model
242
 
243
  dialect_model = Dialect_Detection(10)
244
+ dialect_model.load_state_dict(torch.load(os.path.join(parent_path,'models/dialect_classifier.pt'), map_location=torch.device('cpu')))
245
 
246
  dialect_model = dialect_model.to(device)
247