kowsiknd's picture
update
b3aa083
raw
history blame contribute delete
No virus
1.7 kB
import torch
import transformers
from transformers import BertModel, BertTokenizer, AutoTokenizer
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
###########################################################
review_text = "I love you"
###########################################################
PRE_TRAINED_MODEL_NAME = 'nreimers/BERT-Tiny_L-2_H-128_A-2'
class_names = ["Normal", "Abusive"]
MAX_LEN = "max_length"
class CyberbullyingClassifier(nn.Module):
def __init__(self, n_classes):
super(CyberbullyingClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME).to("cpu")
# self.drop = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
bert_out = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
pooled_output = bert_out[1]
# output = self.drop(pooled_output)
return self.out(pooled_output)
tokenizer = AutoTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
model = CyberbullyingClassifier(2)
model.load_state_dict(torch.load('./best_model_state.bin', map_location=torch.device('cpu')))
def classify(review_text):
encoded_review = tokenizer(review_text, padding=MAX_LEN, truncation=True, return_tensors="pt")
input_ids = encoded_review['input_ids'].to('cpu')
attention_mask = encoded_review['attention_mask'].to('cpu')
output = model(input_ids, attention_mask)
_, prediction = torch.max(output, dim=1)
print(f'Review text: {review_text}')
print(f'Sentiment : {class_names[prediction]}')
return class_names[prediction]