history_mistery / pages /✨second.py
SaviAnna's picture
Update pages/✨second.py
f7ce05d
raw
history blame
1.83 kB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import re
import string
import pickle
import streamlit as st
# Функция очистки текста
def clean(text):
text = text.lower() # нижний регистр
text = re.sub(r'http\S+', " ", text) # удаляем ссылки
text = re.sub(r'@\w+',' ',text) # удаляем упоминания пользователей
text = re.sub(r'#\w+', ' ', text) # удаляем хэштеги
text = re.sub(r'\d+', ' ', text) # удаляем числа
return text
# Загрузка весов модели
model_filename = 'model_comments_weights.pkl'
with open(model_filename, 'rb') as file:
model = pickle.load(file)
# Загрузка весов векторизатора
vectorizer = CountVectorizer()
vectorizer_filename = 'vectorizer_comments_weights.pkl'
with open(vectorizer_filename, 'rb') as file:
vectorizer = pickle.load(file)
# Само приложение
st.title("SafeTalk")
st.write("Your Personal Comment Filter is an innovative application that harnesses the power of AI to distinguish toxic comments from the rest.")
st.write("Empowering users to navigate online discussions with confidence, SafeTalk ensures a more constructive and respectful online community by identifying and flagging harmful content.")
user_review = st.text_input("Enter your comment:", "")
user_review_clean = clean(user_review)
user_features = vectorizer.transform([user_review_clean])
prediction = model.predict(user_features)
st.write("Comment:", user_review)
if prediction == 0:
st.markdown("<p style='color: green;'>Non-toxic comment</p>", unsafe_allow_html=True)
else:
st.markdown("<p style='color: red;'>Toxic comment</p>", unsafe_allow_html=True)