import streamlit as st import pandas as pd import numpy as ny import tensorflow as tf from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences map_id = { 0: "sadness", 1: "anger", 2: "love", 3: "surprise", 4: "fear", 5: "joy" } train = pd.read_csv('train.csv') tokenizer = Tokenizer() tokenizer.fit_on_texts(train.text) model = tf.keras.models.load_model('DETECTION.h5') class Predict: def __init__(self, model, tokenizer): self.model = model self.tokenizer = tokenizer def predict(self, txt): x = pad_sequences(self.tokenizer.texts_to_sequences([txt]), maxlen=30) x = self.model(x) x = ny.argmax(x) return map_id[x] predict = Predict(model, tokenizer) st.title("TONE DETECTION | BCS WINTER PROJECT") st.write("Enter a sentence to analyze text's Tone:") user_input = st.text_input("") if user_input: result = predict.predict(user_input).upper() st.write("TONE :- ",result)