|
import pickle |
|
import joblib |
|
import numpy as np |
|
import tensorflow as tf |
|
from keras.utils import pad_sequences |
|
from keras.preprocessing.text import Tokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = tf.keras.models.load_model('F:\CVFilter\models\model.h5') |
|
|
|
tokenfile = 'F:/CVFilter/tokenized_words/tokenized_words.pkl' |
|
|
|
with open(tokenfile, 'rb') as file: |
|
loaded_tokenized_words = pickle.load(file) |
|
|
|
max_review_length = 200 |
|
tokenizer = Tokenizer(num_words=10000, |
|
filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', |
|
lower=True |
|
) |
|
tokenizer.fit_on_texts(loaded_tokenized_words) |
|
|
|
outcome_labels = ['Business Analyst', 'Cyber Security','Data Engineer','Data Science','DevOps','Machine Learning Engineer','Mobile App Developer','Network Engineer','Quality Assurance','Software Engineer'] |
|
|
|
def model_prediction(text, model=model, tokenizer=tokenizer, labels=outcome_labels): |
|
seq = tokenizer.texts_to_sequences([text]) |
|
padded = pad_sequences(seq, maxlen=max_review_length) |
|
pred = model.predict(padded) |
|
|
|
|
|
return labels[np.argmax(pred)] |