Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import pickle | |
| import string | |
| from nltk.corpus import stopwords | |
| import nltk | |
| from nltk.stem.porter import PorterStemmer | |
| import sklearn | |
| nltk.download('punkt') | |
| nltk.download('stopwords') | |
| nltk.download('corpus') | |
| ps = PorterStemmer() | |
| def transform_text(text): | |
| text = text.lower() | |
| text = nltk.word_tokenize(text) | |
| y = [] | |
| for i in text: | |
| if i.isalnum(): | |
| y.append(i) | |
| text = y[:] | |
| y.clear() | |
| for i in text: | |
| if i not in stopwords.words('english') and i not in string.punctuation: | |
| y.append(i) | |
| text = y[:] | |
| y.clear() | |
| for i in text: | |
| y.append(ps.stem(i)) | |
| return " ".join(y) | |
| tfidf = pickle.load(open('vectorizer.pkl','rb')) | |
| model = pickle.load(open('model.pkl','rb')) | |
| def predict_spam(input_sms): | |
| # 1. Preprocess | |
| transformed_sms = transform_text(input_sms) | |
| # 2. Vectorize | |
| vector_input = tfidf.transform([transformed_sms]) | |
| # 3. Predict | |
| result = model.predict(vector_input)[0] | |
| # 4. Display result | |
| return "Spam" if result == 1 else "Not Spam" | |
| title = "Email/SMS Spam Classifier" | |
| inputs = gr.Text("Enter the message") | |
| outputs = gr.Textbox(label='Results',lines = 20) | |
| interface = gr.Interface(fn=predict_spam, inputs=inputs, outputs=outputs,title=title) | |
| interface.launch(share=True) |