import nltk from nltk import word_tokenize from nltk import pos_tag import joblib import numpy as np from train import feature_vector, pos_tags model = joblib.load('model.pkl') scaler = joblib.load('scaler.pkl') nltk.download('averaged_perceptron_tagger_eng') nltk.download('punkt_tab') def predict(sentence): tokens = word_tokenize(sentence) sent_pos_tags = pos_tag(tokens) sent_features = [] l = len(tokens) for idx, word in enumerate(tokens): current_tag = sent_pos_tags[idx][1] current_idx = pos_tags.index(current_tag) if current_tag in pos_tags else -1 prev_word = tokens[idx-1] if idx!=0 else "" next_word = tokens[idx+1] if idx