Jayesh13 commited on
Commit
aabb9a4
·
1 Parent(s): 01d9dd2

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -136
app.py DELETED
@@ -1,136 +0,0 @@
1
- # Importing necessary libraries
2
- import streamlit as st
3
- import os
4
- import numpy as np
5
- import pandas as pd
6
- import matplotlib.pyplot as plt
7
- import re
8
-
9
- st.title('Toxic Comment Classification')
10
- comment = st.text_area("Enter Your Text", "Type Here")
11
-
12
- comment_input = []
13
- comment_input.append(comment)
14
- test_df = pd.DataFrame()
15
- test_df['comment_text'] = comment_input
16
- cols = {'toxic':[0], 'severe_toxic':[0], 'obscene':[0], 'threat':[0], 'insult':[0], 'identity_hate':[0]}
17
- for key in cols.keys():
18
- test_df[key] = cols[key]
19
- test_df = test_df.reset_index()
20
- test_df.drop(columns=["index"], inplace=True)
21
-
22
- # Data Cleaning and Preprocessing
23
- # creating copy of data for data cleaning and preprocessing
24
- cleaned_data = test_df.copy()
25
-
26
- # Removing Hyperlinks from text
27
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"https?://\S+|www\.\S+","",x) )
28
-
29
- # Removing emojis from text
30
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub("["
31
- u"\U0001F600-\U0001F64F"
32
- u"\U0001F300-\U0001F5FF"
33
- u"\U0001F680-\U0001F6FF"
34
- u"\U0001F1E0-\U0001F1FF"
35
- u"\U00002702-\U000027B0"
36
- u"\U000024C2-\U0001F251"
37
- "]+","", x, flags=re.UNICODE))
38
-
39
- # Removing IP addresses from text
40
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}","",x))
41
-
42
- # Removing html tags from text
43
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"<.*?>","",x))
44
-
45
- # There are some comments which contain double quoted words like --> ""words"" we will convert these to --> "words"
46
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"\"\"", "\"",x)) # replacing "" with "
47
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"^\"", "",x)) # removing quotation from start and the end of the string
48
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"\"$", "",x))
49
-
50
- # Removing Punctuation / Special characters (;:'".?@!%&*+) which appears more than twice in the text
51
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"[^a-zA-Z0-9\s][^a-zA-Z0-9\s]+", " ",x))
52
-
53
- # Removing Special characters
54
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"[^a-zA-Z0-9\s\"\',:;?!.()]", " ",x))
55
-
56
- # Removing extra spaces in text
57
- cleaned_data["comment_text"] = cleaned_data["comment_text"].map(lambda x: re.sub(r"\s\s+", " ",x))
58
-
59
- Final_data = cleaned_data.copy()
60
-
61
- # Model Building
62
- from transformers import DistilBertTokenizer
63
- import torch
64
- import torch.nn as nn
65
- from torch.utils.data import DataLoader, Dataset
66
-
67
- # Using Pretrained DistilBertTokenizer
68
- tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
69
-
70
- # Creating Dataset class for Toxic comments and Labels
71
- class Toxic_Dataset(Dataset):
72
- def __init__(self, Comments_, Labels_):
73
- self.comments = Comments_.copy()
74
- self.labels = Labels_.copy()
75
-
76
- self.comments["comment_text"] = self.comments["comment_text"].map(lambda x: tokenizer(x, padding="max_length", truncation=True, return_tensors="pt"))
77
-
78
- def __len__(self):
79
- return len(self.labels)
80
-
81
- def __getitem__(self, idx):
82
- comment = self.comments.loc[idx,"comment_text"]
83
- label = np.array(self.labels.loc[idx,:])
84
-
85
- return comment, label
86
-
87
- X_test = pd.DataFrame(test_df.iloc[:, 0])
88
- Y_test = test_df.iloc[:, 1:]
89
- Test_data = Toxic_Dataset(X_test, Y_test)
90
- Test_Loader = DataLoader(Test_data, shuffle=False)
91
-
92
- # Loading pre-trained weights of DistilBert model for sequence classification
93
- # and changing classifiers output to 6 because we have 6 labels to classify.
94
- # DistilBERT
95
-
96
- from transformers import DistilBertForSequenceClassification
97
-
98
- Distil_bert = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
99
-
100
- Distil_bert.classifier = nn.Sequential(
101
- nn.Linear(768,6),
102
- nn.Sigmoid()
103
- )
104
- # print(Distil_bert)
105
-
106
- # Instantiating the model and loading the weights
107
- model = Distil_bert
108
- model.to('cpu')
109
- model = torch.load('dsbert_toxic.pt', map_location=torch.device('cpu'))
110
-
111
- # Making Predictions
112
- for comments, labels in Test_Loader:
113
- labels = labels.to('cpu')
114
- labels = labels.float()
115
- masks = comments['attention_mask'].squeeze(1).to('cpu')
116
- input_ids = comments['input_ids'].squeeze(1).to('cpu')
117
-
118
- output = model(input_ids, masks)
119
- op = output.logits
120
-
121
- res = []
122
- for i in range(6):
123
- res.append(op[0, i])
124
- # print(res)
125
-
126
- preds = []
127
-
128
- for i in range(len(res)):
129
- preds.append(res[i].tolist())
130
-
131
-
132
- if st.button('Classify'):
133
- for i in range(len(res)):
134
- st.write(f"{Y_test.columns[i]} : {preds[i]}\n")
135
- st.success('These are the outputs')
136
-