import streamlit as st #for deployment
from keras.models import load_model
import cv2
import numpy as np
from PIL import Image
import joblib
from sklearn.feature_extraction.text import CountVectorizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
#import pickle
import os
word_to_index=imdb.get_word_index()
st.title("Deep Learning App")
st.markdown("""
""", unsafe_allow_html=True)
task=st.radio("**Choose your task:**",["Tumor Detection","Sentiment Classification"])
if task=="Tumor Detection":
st.subheader("Tumor Detection")
def make_tumor_prediction(img,model):
if img is None:
st.warning("Please upload an image.")
return
if img is not None:
img_array = np.array(Image.open(img))
img_array = cv2.resize(img_array, (128, 128))
input_img = np.expand_dims(img_array, axis=0)
prediction = model.predict(input_img)
if prediction > 0.5:
st.success("Tumor Detected")
else:
st.success("No Tumor")
ct_path = os.path.join("Saved_Models", "cnn_tumor.h5")
model=load_model(ct_path)
img=st.file_uploader("Insert your image in .png format")
if st.button("**Make Prediction**"):
make_tumor_prediction(img, model)
elif task=="Sentiment Classification":
st.subheader("Sentiment Classification")
data=st.radio("**Choose your dataset**",['IMDB Movie Reviews','SMS Spam Detection'])
model=st.selectbox("**Select the model:**",("Perceptron","BackPropogation","DNN","RNN","LSTM"))
if data=="IMDB Movie Reviews" and model=="Perceptron":
pe_path = os.path.join("Saved_Models", "perceptron_imdb.joblib")
imdb_perceptron = joblib.load(pe_path)
def perceptron_predict_imdb(review, vectorizer, model):
review_bow = vectorizer.transform([review])
prediction = imdb_perceptron.predict(review_bow)
if prediction > 0.5 :
st.success("Positive sentiment")
else:
st.success("Negative sentiment")
review=st.text_input("Give your review here.")
ve_path = os.path.join("Saved_Models", "vectorizer_imdb.joblib")
vectorizer = joblib.load(ve_path)
if st.button("**Make Prediction**"):
perceptron_predict_imdb(review,vectorizer,imdb_perceptron)
elif data=="IMDB Movie Reviews" and model=="LSTM":
li_path = os.path.join("Saved_Models", "lstm_imdb.h5")
imdb_lstm=load_model(li_path)
def lstm_predict_imdb(review, model, max_review_length):
top_words=5000
# Convert the review to the IMDB dataset format
review_sequence = imdb.get_word_index()
review = [review_sequence[word] if word in review_sequence and review_sequence[word] < top_words else 0 for word in review.split()]
review = sequence.pad_sequences([review], maxlen=max_review_length)
prediction = model.predict(review)
if prediction>0.5:
st.write("Positive Sentiment")
else:
st.write("Negative Sentiment")
review=st.text_input("Enter your review here.")
if st.button("**Make Prediction**"):
lstm_predict_imdb(review,imdb_lstm,500)
elif data=="IMDB Movie Reviews" and model=="RNN":
ri_path = os.path.join("Saved_Models", "rnn_imdb.h5")
imdb_rnn=load_model(ri_path)
def rnn_predict_imdb(review, model, max_review_length):
top_words=5000
# Convert the review to the IMDB dataset format
review_sequence = imdb.get_word_index()
review = [review_sequence[word] if word in review_sequence and review_sequence[word] < top_words else 0 for word in review.split()]
review = sequence.pad_sequences([review], maxlen=max_review_length)
prediction = model.predict(review)
if prediction>0.5:
st.write("Positive Sentiment")
else:
st.write("Negative Sentiment")
review=st.text_input("Enter your review here.")
if st.button("**Make Prediction**"):
rnn_predict_imdb(review,imdb_rnn,500)
elif data=="IMDB Movie Reviews" and model=="DNN":
di_path = os.path.join("Saved_Models", "dnn_imdb.h5")
imdb_dnn = load_model(di_path)
def dnn_predict_sentiment(review, model):
prediction = imdb_dnn.predict(review)
if prediction > 0.5 :
st.success("Positive sentiment")
else:
st.success("Negative sentiment")
review=st.text_input("Enter your review here.")
if st.button("**Make Prediction**"):
word_to_index = imdb.get_word_index()
max_review_length=500
new_review_tokens=[word_to_index.get(word, 0) for word in review.split()]
new_review_tokens = pad_sequences([new_review_tokens], maxlen=max_review_length)
dnn_predict_sentiment(new_review_tokens,imdb_dnn)
elif data == "IMDB Movie Reviews" and model == "BackPropogation":
ba_path = os.path.join("Saved_Models", "backprop_imdb.joblib")
backprop=joblib.load(ba_path)
def sentiment_classification(new_review_text, model):
max_review_length = 500
new_review_tokens = [word_to_index.get(word, 0) for word in new_review_text.split()]
new_review_tokens = pad_sequences([new_review_tokens], maxlen=max_review_length)
prediction = model.predict(new_review_tokens)
# Extract the first element if prediction is a list
prediction = prediction[0] if isinstance(prediction, list) else prediction
# Convert the prediction to a float (assuming it's a numeric value)
prediction = float(prediction) if prediction is not None else None
return prediction
review = st.text_input("Enter your review here.")
if st.button("Make Prediction"):
prediction = sentiment_classification(review, backprop)
# Display the result
if prediction > 0.5:
st.success("Positive Sentiment")
else:
st.success("Negative Sentiment")
elif data=="SMS Spam Detection" and model=="RNN":
rs_path = os.path.join("Saved_Models", "rnn_spam")
spam_rnn=load_model(rs_path)
def rnn_predict_spam(review,model):
tokeniser = tf.keras.preprocessing.text.Tokenizer()
tokeniser.fit_on_texts(review)
encoded_text = tokeniser.texts_to_sequences(review)
padded_text = tf.keras.preprocessing.sequence.pad_sequences(encoded_text, maxlen=10, padding='post')
predictions = (model.predict(padded_text) > 0.5).astype("int32")
if any(predictions >0.5):
st.write("Positive Sentiment")
else:
st.write("Negative Sentiment")
review=st.text_input("Enter your review here.")
if st.button("**Make Predictions**"):
rnn_predict_spam(review,spam_rnn)