import streamlit as st from transformers import AutoTokenizer from transformers import TFAUTOModelForSequenceClassification import tensorflow as tf tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') model = TFAUTOModelForSequenceClassification.from_pretrained('./') text = st.text_area('Your StackOverFlow Question') def preprocess_predict(text): inputs = tokenizer.encode(text, add_special_tokens = True, padding='max_length', truncation=True, max_length=SEQ_LEN, return_token_type_ids=False, return_tensors = 'tf') preds = model.predict(imputs) probabilities = tf.nn.softmax(preds['logits']) class_preds = np.argmax(probabilities,axis=1) return class_preds if text =True out = preprocess_predict(text) st.json(out)