JKJanosko
fixed directories in app.py
fba9b41 unverified
raw
history blame
8.21 kB
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import torch
from transformers import AutoTokenizer
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from transformers import AutoModel, AdamW, get_cosine_schedule_with_warmup
import torch.nn as nn
import math
from torchmetrics.functional.classification import auroc
import torch.nn.functional as F
import streamlit as st
from transformers import pipeline
class toxicity_dataset(Dataset):
def __init__(self,data_path,tokenizer,attributes,max_token_len= 128,sample = 1000):
self.data_path=data_path
self.tokenizer=tokenizer
self.attributes=attributes
self.max_token_len=max_token_len
self.sample=sample
self._prepare_data()
def _prepare_data(self):
data=pd.read_csv(self.data_path)
if self.sample is not None:
self.data=data.sample(self.sample,random_state=7)
else:
self.data=data
def __len__(self):
return(len(self.data))
def __getitem__(self,index):
item = self.data.iloc[index]
comment = str(item.comment_text)
attributes = torch.FloatTensor(item[self.attributes])
tokens = self.tokenizer.encode_plus(comment,add_special_tokens=True,return_tensors="pt",truncation=True,max_length=self.max_token_len,padding="max_length",return_attention_mask=True)
return{'input_ids':tokens.input_ids.flatten(),"attention_mask":tokens.attention_mask.flatten(),"labels":attributes}
class Toxcity_Data_Module(pl.LightningDataModule):
def __init__(self,train_path,test_path,attributes,batch_size = 16, max_token_len = 128, model_name="roberta-base"):
super().__init__()
self.train_path=train_path
self.test_path=test_path
self.attributes=attributes
self.batch_size=batch_size
self.max_token_len=max_token_len
self.model_name=model_name
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
def setup(self, stage = None):
if stage in (None, "fit"):
self.train_dataset=toxicity_dataset(self.train_path,self.tokenizer,self.attributes)
self.test_dataset=toxicity_dataset(self.test_path,self.tokenizer,self.attributes, sample=None)
if stage == "predict":
self.val_dataset=toxicity_dataset(self.test_path,self.tokenizer,self.attributes)
def train_dataloader(self):
return DataLoader(self.train_dataset,batch_size=self.batch_size,shuffle=True)
def val_dataloader(self):
return DataLoader(self.train_dataset,batch_size=self.batch_size,shuffle=False)
def predict_dataloader(self):
return DataLoader(self.test_dataset,batch_size=self.batch_size,shuffle=False)
class Toxic_Comment_Classifier(pl.LightningModule):
def __init__(self, config: dict):
super().__init__()
self.config = config
self.pretrained_model = AutoModel.from_pretrained(config['model_name'], return_dict = True)
self.hidden = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)
self.classifier = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.config['n_labels'])
torch.nn.init.xavier_uniform_(self.classifier.weight)
self.loss_func = nn.BCEWithLogitsLoss(reduction='mean')
self.dropout = nn.Dropout()
def forward(self, input_ids, attention_mask=None, labels=None):
# roberta layer
output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)
pooled_output = torch.mean(output.last_hidden_state, 1)
# final logits
pooled_output = self.dropout(pooled_output)
pooled_output = self.hidden(pooled_output)
pooled_output = F.relu(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# calculate loss
loss = 0
if labels is not None:
loss = self.loss_func(logits.view(-1, self.config['n_labels']), labels.view(-1, self.config['n_labels']))
return loss, logits
def training_step(self, batch, batch_index):
loss, outputs = self(**batch)
self.log("train loss ", loss, prog_bar = True, logger=True)
return {"loss":loss, "predictions":outputs, "labels": batch["labels"]}
def validation_step(self, batch, batch_index):
loss, outputs = self(**batch)
self.log("validation loss ", loss, prog_bar = True, logger=True)
return {"val_loss": loss, "predictions":outputs, "labels": batch["labels"]}
def predict_step(self, batch, batch_index):
loss, outputs = self(**batch)
return outputs
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.config['lr'], weight_decay=self.config['w_decay'])
total_steps = self.config['train_size']/self.config['bs']
warmup_steps = math.floor(total_steps * self.config['warmup'])
warmup_steps = math.floor(total_steps * self.config['warmup'])
scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
return [optimizer],[scheduler]
def predict_raw_comments(model, dm, trainer):
#print("debug1")
predictions = trainer.predict(model,dm)
#print("debug2")
flattened_predictions = np.stack([torch.sigmoid(torch.Tensor(p)) for batch in predictions for p in batch])
#print("debug3")
return flattened_predictions
def main():
# -- Creates Variables for Use of Model --
attributes=["toxic","severe_toxic","obscene","threat","insult","identity_hate"]
tokenizer=AutoTokenizer.from_pretrained("roberta-base")
toxic_comments_dataset=toxicity_dataset("AppDirectory/data/train.csv",tokenizer,attributes)
toxicity_data_module=Toxcity_Data_Module("AppDirectory/data/train.csv","AppDirectory/data/test.csv",attributes)
toxicity_data_module.setup()
dataloader=toxicity_data_module.train_dataloader()
config = {
'model_name':"distilroberta-base",
'n_labels':len(attributes),
'bs':128,
'lr':1.5e-6,
'warmup':0.2,
"train_size":len(toxicity_data_module.train_dataloader()),
'w_decay':0.001,
'n_epochs':1
}
toxicity_data_module=Toxcity_Data_Module("AppDirectory/data/train.csv","AppDirectory/data/reduced_test.csv",attributes,batch_size=config['bs'])
toxicity_data_module.setup()
trainer = pl.Trainer(max_epochs=config['n_epochs'],num_sanity_val_steps=50)
## -- Creates Streamlit App --
st.title("Tweet Toxicity Classifier ")
st.header("Fine tuned model from roberta-base using PyTorch")
st.header("Jozef Janosko - CS 482, Milestone 3")
model_name = st.selectbox("Select Model...", ["Toxicity Classification Model"])
if st.button("Click to Load Data"):
if model_name=="Toxicity Classification Model":
model = torch.load("ToxicityClassificationModel.pt")
with st.spinner('Analyzing Text...'):
logits = predict_raw_comments(model,toxicity_data_module,trainer=trainer)
torch_logits = torch.from_numpy(logits)
probabilities = F.softmax(torch_logits, dim = -1).numpy()
inputs=pd.read_csv("AppDirectory/data/reduced_test.csv")
data=[]
#print(inputs["comment_text"][0]," ",probabilities)
for i in range(len(probabilities)):
max_prob = 0
max_cat = 6
prob=0
for j in range(6):
prob=probabilities[i][j]
if(prob >= max_prob):
max_prob = prob
max_cat = j
#print(inputs["comment_text"][i]," ",attributes[max_cat]," ",max_prob," ",probabilities[i])
data.append([inputs["comment_text"][i][0:16]+"...",attributes[max_cat],max_prob])
results_df=pd.DataFrame(data,columns=["Comment Text","Most Likely Classification","Classification Probability"])
st.table(data=results_df)
if __name__ == '__main__' :
main()