imperialwool commited on
Commit
c7dfb4e
1 Parent(s): 2204e85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -1
app.py CHANGED
@@ -92,6 +92,8 @@ def getBMFull(): return osuApi.getFull(request)
92
  ###############
93
  # LOAD MODELS
94
  sa_t, sa_m = AutoTokenizer.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment"), AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment")
 
 
95
 
96
  ##############
97
  # ANALYZE DATA API
@@ -103,7 +105,7 @@ def sentimentAnalys():
103
  if text == "": return {"status": "error", "details": { "error_code": 101, "error_details": "No text provided" }}
104
 
105
  inputs = sa_t(text, return_tensors="pt")
106
-
107
  # Предсказание тональности текста
108
  outputs = sa_m(**inputs)
109
  logits = outputs.logits
@@ -112,6 +114,35 @@ def sentimentAnalys():
112
 
113
  return {"status": "pass", "predicted_sentiment": predicted_sentiment}
114
  except Exception as e: return {"status": "error", "details": { "error_code": 123, "error_details": str(e).replace("\n", " | ") }}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  if __name__ == "__main__":
117
  config = configFile()
 
92
  ###############
93
  # LOAD MODELS
94
  sa_t, sa_m = AutoTokenizer.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment"), AutoModelForSequenceClassification.from_pretrained("cardiffnlp/twitter-xlm-roberta-base-sentiment")
95
+ tc_t, tc_m = AutoTokenizer.from_pretrained("EIStakovskii/xlm_roberta_base_multilingual_toxicity_classifier_plus"), AutoModelForSequenceClassification.from_pretrained("EIStakovskii/xlm_roberta_base_multilingual_toxicity_classifier_plus")
96
+ chct_t, chct_m = AutoTokenizer.from_pretrained("cointegrated/rut5-small-chitchat"), AutoModelForSequenceClassification.from_pretrained("cointegrated/rut5-small-chitchat")
97
 
98
  ##############
99
  # ANALYZE DATA API
 
105
  if text == "": return {"status": "error", "details": { "error_code": 101, "error_details": "No text provided" }}
106
 
107
  inputs = sa_t(text, return_tensors="pt")
108
+
109
  # Предсказание тональности текста
110
  outputs = sa_m(**inputs)
111
  logits = outputs.logits
 
114
 
115
  return {"status": "pass", "predicted_sentiment": predicted_sentiment}
116
  except Exception as e: return {"status": "error", "details": { "error_code": 123, "error_details": str(e).replace("\n", " | ") }}
117
+ @app.route('/analyzeText/api/v1/toxicity', methods=['GET', 'POST'])
118
+ def toxicityAnalys():
119
+ try:
120
+ text = request.form.get('text') or request.args.get('text') or request.values.get('text') or ""
121
+ if text == "": return {"status": "error", "details": { "error_code": 101, "error_details": "No text provided" }}
122
+
123
+ inputs = tc_t(text, return_tensors="pt")
124
+
125
+ # Предсказание тональности текста
126
+ outputs = tc_m(**inputs)
127
+ logits = outputs.logits
128
+ predicted_class = logits.argmax(dim=1).item()
129
+ predicted_sentiment = True if str(tc_m.config.id2label[predicted_class]) == "LABEL_1" else False
130
+
131
+ return {"status": "pass", "toxicity": predicted_sentiment}
132
+ except Exception as e: return {"status": "error", "details": { "error_code": 123, "error_details": str(e).replace("\n", " | ") }}
133
+ @app.route('/analyzeText/api/v1/chitchat', methods=['GET', 'POST'])
134
+ def chitchatRu():
135
+ try:
136
+ text = request.form.get('text') or request.args.get('text') or request.values.get('text') or ""
137
+ if text == "": return {"status": "error", "details": { "error_code": 101, "error_details": "No text provided" }}
138
+
139
+ inputs = chct_t(text, padding=True, truncation=True, return_tensors="pt")
140
+ outputs = chct_m(**inputs)
141
+ predicted_class = outputs.logits.argmax(dim=1).item()
142
+ answer = chct_t.decode(predicted_class)
143
+
144
+ return {"status": "pass", "answer": answer}
145
+ except Exception as e: return {"status": "error", "details": { "error_code": 123, "error_details": str(e).replace("\n", " | ") }}
146
 
147
  if __name__ == "__main__":
148
  config = configFile()