Teery commited on
Commit
62fb805
1 Parent(s): ebef398

Update pages/toxic.py

Browse files
Files changed (1) hide show
  1. pages/toxic.py +7 -7
pages/toxic.py CHANGED
@@ -1,15 +1,15 @@
1
  import streamlit as st
2
  import requests
3
 
4
- API_URL = "https://api-inference.huggingface.co/models/cointegrated/rubert-tiny-toxicity"
5
- headers = {"Authorization": f"Bearer {'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'}"}
 
 
6
 
7
- def query(payload):
8
- response = requests.post(API_URL, headers=headers, json=payload)
9
- return response.json()
10
  text = st.text_input("Введите комментарий")
11
  if text:
12
- output = query(text)[0].get('label')
13
  if output == 'dangerous':
14
  st.markdown('<p style="color:red;">ОПАСНЫЙ КОММЕНТАРИЙ</p>', unsafe_allow_html=True)
15
  elif output == 'non-toxic':
@@ -19,4 +19,4 @@ if text:
19
  elif output == 'threat':
20
  st.markdown('<p style="color:red;">Угрожающий комментарий</p>', unsafe_allow_html=True)
21
  elif output == 'obscenity':
22
- st.markdown('<p style="color:pink;">ууу, непристойности</p>', unsafe_allow_html=True)
 
1
  import streamlit as st
2
  import requests
3
 
4
+ import streamlit as st
5
+ import requests
6
+
7
+ from transformers import pipeline
8
 
9
+ pipe = pipeline("text-classification", model="cointegrated/rubert-tiny-toxicity")
 
 
10
  text = st.text_input("Введите комментарий")
11
  if text:
12
+ output = pipe(text)[0].get('label')
13
  if output == 'dangerous':
14
  st.markdown('<p style="color:red;">ОПАСНЫЙ КОММЕНТАРИЙ</p>', unsafe_allow_html=True)
15
  elif output == 'non-toxic':
 
19
  elif output == 'threat':
20
  st.markdown('<p style="color:red;">Угрожающий комментарий</p>', unsafe_allow_html=True)
21
  elif output == 'obscenity':
22
+ st.markdown('<p style="color:pink;">ууу, непристойности</p>', unsafe_allow_html=True)