Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import re
|
5 |
+
import pickle
|
6 |
+
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
7 |
+
from sklearn.preprocessing import LabelEncoder
|
8 |
+
from sklearn.metrics import accuracy_score
|
9 |
+
from nltk.corpus import stopwords
|
10 |
+
from nltk.stem import WordNetLemmatizer
|
11 |
+
import inflect
|
12 |
+
|
13 |
+
# Load the tokenizer, label encoder, and model
|
14 |
+
def load_resources():
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained('./transformer_tokenizer')
|
16 |
+
with open('./label_encoder_tf.pickle', 'rb') as handle:
|
17 |
+
encoder = pickle.load(handle)
|
18 |
+
model = TFAutoModelForSequenceClassification.from_pretrained('./transformer_model')
|
19 |
+
return tokenizer, encoder, model
|
20 |
+
|
21 |
+
tokenizer, encoder, model = load_resources()
|
22 |
+
|
23 |
+
# Preprocessing functions
|
24 |
+
def expand_contractions(text, contractions_dict):
|
25 |
+
contractions_pattern = re.compile('({})'.format('|'.join(contractions_dict.keys())), flags=re.IGNORECASE | re.DOTALL)
|
26 |
+
def expand_match(contraction):
|
27 |
+
match = contraction.group(0)
|
28 |
+
first_char = match[0]
|
29 |
+
expanded_contraction = contractions_dict.get(match.lower(), match)
|
30 |
+
return first_char + expanded_contraction[1:]
|
31 |
+
expanded_text = contractions_pattern.sub(expand_match, text)
|
32 |
+
return re.sub("'", "", expanded_text)
|
33 |
+
|
34 |
+
def convert_numbers_to_words(text):
|
35 |
+
p = inflect.engine()
|
36 |
+
words = text.split()
|
37 |
+
return ' '.join([p.number_to_words(word) if word.isdigit() else word for word in words])
|
38 |
+
|
39 |
+
def preprocess_text(text):
|
40 |
+
contractions_dict = {
|
41 |
+
"ain't": "am not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have", "'cause": "because",
|
42 |
+
"could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not",
|
43 |
+
"doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not",
|
44 |
+
"haven't": "have not", "he'd": "he had", "he'd've": "he would have", "he'll": "he will", "he'll've": "he will have",
|
45 |
+
"he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is",
|
46 |
+
"I'd": "I had", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am", "I've": "I have",
|
47 |
+
"isn't": "is not", "it'd": "it had", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have",
|
48 |
+
"it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not",
|
49 |
+
"mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have",
|
50 |
+
"needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not",
|
51 |
+
"oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have",
|
52 |
+
"she'd": "she had", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is",
|
53 |
+
"should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have",
|
54 |
+
"so's": "so is", "that'd": "that had", "that'd've": "that would have", "that's": "that is", "there'd": "there had",
|
55 |
+
"there'd've": "there would have", "there's": "there is", "they'd": "they had", "they'd've": "they would have",
|
56 |
+
"they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have",
|
57 |
+
"wasn't": "was not", "we'd": "we had", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have",
|
58 |
+
"we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have",
|
59 |
+
"what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have",
|
60 |
+
"where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have",
|
61 |
+
"who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not",
|
62 |
+
"won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have",
|
63 |
+
"y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are",
|
64 |
+
"y'all've": "you all have", "you'd": "you had", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have",
|
65 |
+
"you're": "you are", "you've": "you have"
|
66 |
+
}
|
67 |
+
text = text.lower()
|
68 |
+
text = expand_contractions(text, contractions_dict)
|
69 |
+
text = convert_numbers_to_words(text)
|
70 |
+
text = re.sub(r'[^\w\s]', '', text)
|
71 |
+
stop_words = set(stopwords.words('english'))
|
72 |
+
text = ' '.join([word for word in text.split() if word not in stop_words])
|
73 |
+
lemmatizer = WordNetLemmatizer()
|
74 |
+
text = ' '.join([lemmatizer.lemmatize(word) for word in text.split()])
|
75 |
+
return text
|
76 |
+
|
77 |
+
# Define the prediction function
|
78 |
+
def predict_spam(text):
|
79 |
+
preprocessed_text = preprocess_text(text)
|
80 |
+
encoding = tokenizer(preprocessed_text, return_tensors='tf', truncation=True, padding=True)
|
81 |
+
prediction = model(encoding).logits
|
82 |
+
predicted_label = np.argmax(prediction, axis=1)
|
83 |
+
decoded_label = encoder.inverse_transform(predicted_label)
|
84 |
+
return decoded_label[0]
|
85 |
+
|
86 |
+
# Create the Gradio interface
|
87 |
+
iface = gr.Interface(fn=predict_spam,
|
88 |
+
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter SMS message here..."),
|
89 |
+
outputs="text",
|
90 |
+
title="SMS Spam Classification with Transformer Model",
|
91 |
+
description="Enter an SMS message to classify it as spam or ham.")
|
92 |
+
|
93 |
+
# Launch the interface
|
94 |
+
iface.launch()
|