Spaces:
Sleeping
Sleeping
File size: 1,508 Bytes
a971e76 9bbae05 1c0fd7a a971e76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from pathlib import Path
from sklearn.model_selection import train_test_split
import torch
from torch.utils.data import Dataset
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
from transformers import Trainer, TrainingArguments
import streamlit as st
from streamlit_chat import message
import requests
model_one = "distilbert-base-uncased-finetuned-sst-2-english"
model_two = "Newtral/xlm-r-finetuned-toxic-political-tweets-es"
def toxicRating(text, model):
model = AutoModelForSequenceClassification.from_pretrained(model)
tokenizer = AutoTokenizer.from_pretrained(model)
classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
results = classifier(text)
return results
def main():
st.title("TOXIC TWEETS, \n TOXIC OR NOT?")
prompt = st.header("Select Model")
selection = st.radio("Models",('Model 1', 'Model 2'))
input = st.text_area("Enter Tweet: ")
if input:
if selection == 'Model 1':
rating = rate_ModelOne(input, model_one)
st.write(f"Label: {rating[1]} \n Score : {rating[3]}")
elif selection == 'Model 2':
rating = rate_ModelTwo(input, model_two)
rating = rate_ModelOne(input, model_one)
st.write(f"Label: {rating[1]} \n Score : {rating[3]}")
else:
st.warning("Enter Tweet")
if __name__ == "__main__":
main(); |