File size: 1,086 Bytes
bdab8dd
8fdaf9e
bdab8dd
 
8fdaf9e
 
bdab8dd
8fdaf9e
 
 
 
 
 
 
bdab8dd
8fdaf9e
bdab8dd
8fdaf9e
 
 
 
 
 
 
bdab8dd
8fdaf9e
bdab8dd
8fdaf9e
 
bdab8dd
8fdaf9e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import numpy as np
import torch
import pandas as pd
import torch.nn.functional as F

model_name = "unitary/toxic-bert"

tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)


df = pd.DataFrame(columns=("Tweet", "Toxicity", "Probability"))

sample_tweets = ["Ask Sityush to clean up his behavior than issue me nonsensical warnings...", "be a man and lets discuss it-maybe over the phone?", "Don't look, come or think of comming back! Tosser."]

classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
results = classifier(sample_tweets)

batch  = tokenizer(sample_tweets, padding=True, truncation=True, max_length=512, return_tensors="pt")

# assignment 3
st.title("CS482 Project Sentiment Analysis")

st.markdown("**:red[unitary/toxic-bert]**")

for i in range(len(sample_tweets)):
    df.loc[len(df.index)] = [sample_tweets[i], results[i]["label"], results[i]["score"]]

st.table(df)