JKJanosko commited on
Commit
c206587
1 Parent(s): 27b305f

Add files via upload

Browse files
Files changed (2) hide show
  1. app.py +188 -0
  2. requirements.txt +0 -0
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from torch.utils.data import Dataset
4
+ import torch
5
+ from transformers import AutoTokenizer
6
+ import pytorch_lightning as pl
7
+ from torch.utils.data import DataLoader
8
+ from transformers import AutoModel, AdamW, get_cosine_schedule_with_warmup
9
+ import torch.nn as nn
10
+ import math
11
+ from torchmetrics.functional.classification import auroc
12
+ import torch.nn.functional as F
13
+ import streamlit as st
14
+ from transformers import pipeline
15
+
16
+
17
+
18
+ class toxicity_dataset(Dataset):
19
+ def __init__(self,data_path,tokenizer,attributes,max_token_len= 128,sample = 1000):
20
+ self.data_path=data_path
21
+ self.tokenizer=tokenizer
22
+ self.attributes=attributes
23
+ self.max_token_len=max_token_len
24
+ self.sample=sample
25
+ self._prepare_data()
26
+ def _prepare_data(self):
27
+ data=pd.read_csv(self.data_path)
28
+ if self.sample is not None:
29
+ self.data=data.sample(self.sample,random_state=7)
30
+ else:
31
+ self.data=data
32
+ def __len__(self):
33
+ return(len(self.data))
34
+ def __getitem__(self,index):
35
+ item = self.data.iloc[index]
36
+ comment = str(item.comment_text)
37
+ attributes = torch.FloatTensor(item[self.attributes])
38
+ tokens = self.tokenizer.encode_plus(comment,add_special_tokens=True,return_tensors="pt",truncation=True,max_length=self.max_token_len,padding="max_length",return_attention_mask=True)
39
+ return{'input_ids':tokens.input_ids.flatten(),"attention_mask":tokens.attention_mask.flatten(),"labels":attributes}
40
+
41
+ class Toxcity_Data_Module(pl.LightningDataModule):
42
+ def __init__(self,train_path,test_path,attributes,batch_size = 16, max_token_len = 128, model_name="roberta-base"):
43
+ super().__init__()
44
+ self.train_path=train_path
45
+ self.test_path=test_path
46
+ self.attributes=attributes
47
+ self.batch_size=batch_size
48
+ self.max_token_len=max_token_len
49
+ self.model_name=model_name
50
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
51
+ def setup(self, stage = None):
52
+ if stage in (None, "fit"):
53
+ self.train_dataset=toxicity_dataset(self.train_path,self.tokenizer,self.attributes)
54
+ self.test_dataset=toxicity_dataset(self.test_path,self.tokenizer,self.attributes, sample=None)
55
+ if stage == "predict":
56
+ self.val_dataset=toxicity_dataset(self.test_path,self.tokenizer,self.attributes)
57
+ def train_dataloader(self):
58
+ return DataLoader(self.train_dataset,batch_size=self.batch_size,shuffle=True)
59
+ def val_dataloader(self):
60
+ return DataLoader(self.train_dataset,batch_size=self.batch_size,shuffle=False)
61
+ def predict_dataloader(self):
62
+ return DataLoader(self.test_dataset,batch_size=self.batch_size,shuffle=False)
63
+
64
+ class Toxic_Comment_Classifier(pl.LightningModule):
65
+ def __init__(self, config: dict):
66
+ super().__init__()
67
+ self.config = config
68
+ self.pretrained_model = AutoModel.from_pretrained(config['model_name'], return_dict = True)
69
+ self.hidden = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)
70
+ self.classifier = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.config['n_labels'])
71
+ torch.nn.init.xavier_uniform_(self.classifier.weight)
72
+ self.loss_func = nn.BCEWithLogitsLoss(reduction='mean')
73
+ self.dropout = nn.Dropout()
74
+
75
+ def forward(self, input_ids, attention_mask=None, labels=None):
76
+ # roberta layer
77
+ output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)
78
+ pooled_output = torch.mean(output.last_hidden_state, 1)
79
+ # final logits
80
+ pooled_output = self.dropout(pooled_output)
81
+ pooled_output = self.hidden(pooled_output)
82
+ pooled_output = F.relu(pooled_output)
83
+ pooled_output = self.dropout(pooled_output)
84
+ logits = self.classifier(pooled_output)
85
+ # calculate loss
86
+ loss = 0
87
+ if labels is not None:
88
+ loss = self.loss_func(logits.view(-1, self.config['n_labels']), labels.view(-1, self.config['n_labels']))
89
+ return loss, logits
90
+
91
+ def training_step(self, batch, batch_index):
92
+ loss, outputs = self(**batch)
93
+ self.log("train loss ", loss, prog_bar = True, logger=True)
94
+ return {"loss":loss, "predictions":outputs, "labels": batch["labels"]}
95
+
96
+ def validation_step(self, batch, batch_index):
97
+ loss, outputs = self(**batch)
98
+ self.log("validation loss ", loss, prog_bar = True, logger=True)
99
+ return {"val_loss": loss, "predictions":outputs, "labels": batch["labels"]}
100
+
101
+ def predict_step(self, batch, batch_index):
102
+ loss, outputs = self(**batch)
103
+ return outputs
104
+
105
+ def configure_optimizers(self):
106
+ optimizer = AdamW(self.parameters(), lr=self.config['lr'], weight_decay=self.config['w_decay'])
107
+ total_steps = self.config['train_size']/self.config['bs']
108
+ warmup_steps = math.floor(total_steps * self.config['warmup'])
109
+ warmup_steps = math.floor(total_steps * self.config['warmup'])
110
+ scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
111
+ return [optimizer],[scheduler]
112
+
113
+
114
+
115
+ def predict_raw_comments(model, dm, trainer):
116
+ #print("debug1")
117
+ predictions = trainer.predict(model,dm)
118
+ #print("debug2")
119
+ flattened_predictions = np.stack([torch.sigmoid(torch.Tensor(p)) for batch in predictions for p in batch])
120
+ #print("debug3")
121
+ return flattened_predictions
122
+
123
+
124
+
125
+ def main():
126
+ # -- Creates Variables for Use of Model --
127
+ attributes=["toxic","severe_toxic","obscene","threat","insult","identity_hate"]
128
+ tokenizer=AutoTokenizer.from_pretrained("roberta-base")
129
+ toxic_comments_dataset=toxicity_dataset("data/train.csv",tokenizer,attributes)
130
+
131
+ toxicity_data_module=Toxcity_Data_Module("data/train.csv","data/test.csv",attributes)
132
+ toxicity_data_module.setup()
133
+ dataloader=toxicity_data_module.train_dataloader()
134
+
135
+ config = {
136
+ 'model_name':"distilroberta-base",
137
+ 'n_labels':len(attributes),
138
+ 'bs':128,
139
+ 'lr':1.5e-6,
140
+ 'warmup':0.2,
141
+ "train_size":len(toxicity_data_module.train_dataloader()),
142
+ 'w_decay':0.001,
143
+ 'n_epochs':1
144
+ }
145
+
146
+ toxicity_data_module=Toxcity_Data_Module("data/train.csv","data/reduced_test.csv",attributes,batch_size=config['bs'])
147
+ toxicity_data_module.setup()
148
+
149
+
150
+ trainer = pl.Trainer(max_epochs=config['n_epochs'],num_sanity_val_steps=50)
151
+
152
+ ## -- Creates Streamlit App --
153
+ st.title("Tweet Toxicity Classifier ")
154
+ st.header("Fine tuned model from roberta-base using PyTorch")
155
+ st.header("Jozef Janosko - CS 482, Milestone 3")
156
+
157
+ model_name = st.selectbox("Select Model...", ["Toxicity Classification Model"])
158
+
159
+ if st.button("Click to Load Data"):
160
+ if model_name=="Toxicity Classification Model":
161
+ model = torch.load("ToxicityClassificationModel.pt")
162
+ with st.spinner('Analyzing Text...'):
163
+ logits = predict_raw_comments(model,toxicity_data_module,trainer=trainer)
164
+ torch_logits = torch.from_numpy(logits)
165
+ probabilities = F.softmax(torch_logits, dim = -1).numpy()
166
+ inputs=pd.read_csv("data/reduced_test.csv")
167
+ data=[]
168
+ #print(inputs["comment_text"][0]," ",probabilities)
169
+ for i in range(len(probabilities)):
170
+ max_prob = 0
171
+ max_cat = 6
172
+
173
+ prob=0
174
+ for j in range(6):
175
+ prob=probabilities[i][j]
176
+ if(prob >= max_prob):
177
+ max_prob = prob
178
+ max_cat = j
179
+ #print(inputs["comment_text"][i]," ",attributes[max_cat]," ",max_prob," ",probabilities[i])
180
+ data.append([inputs["comment_text"][i][0:16]+"...",attributes[max_cat],max_prob])
181
+ results_df=pd.DataFrame(data,columns=["Comment Text","Most Likely Classification","Classification Probability"])
182
+ st.table(data=results_df)
183
+
184
+
185
+
186
+
187
+ if __name__ == '__main__' :
188
+ main()
requirements.txt ADDED
Binary file (112 Bytes). View file