|
import gradio as gr |
|
import tensorflow as tf |
|
|
|
def split_char(text): |
|
return " ".join(list(text)) |
|
|
|
from spacy.lang.en import English |
|
|
|
def make_predictions(Input): |
|
class_names=['BACKGROUND','CONCLUSIONS','METHODS','OBJECTIVE','RESULTS'] |
|
|
|
nlp = English() |
|
|
|
|
|
nlp.add_pipe('sentencizer') |
|
|
|
|
|
doc=nlp(Input) |
|
|
|
|
|
sents_list = [] |
|
for sent in doc.sents: |
|
sents_list.append(sent.text) |
|
|
|
abstract_sentences=sents_list |
|
|
|
|
|
sample_line=[] |
|
for i,line in enumerate(abstract_sentences): |
|
sample_dict={} |
|
sample_dict["text"]=str(line) |
|
sample_dict["line_number"]=i |
|
sample_dict["total_lines"]=len(abstract_sentences)-1 |
|
sample_line.append(sample_dict) |
|
|
|
|
|
abstract_line_number=[line["line_number"] for line in sample_line] |
|
|
|
abstract_line_number_one_hot=tf.one_hot(abstract_line_number,depth=15) |
|
|
|
|
|
abstract_total_lines=[line["total_lines"] for line in sample_line] |
|
|
|
abstract_total_lines_one_hot=tf.one_hot(abstract_total_lines,depth=20) |
|
|
|
|
|
abstract_char=[split_char(sentence) for sentence in abstract_sentences] |
|
|
|
|
|
|
|
skimlit_universal_sentence_encoder_model=tf.keras.models.load_model("/content/drive/MyDrive/skimlit_models/Universal_sentence_encoder_Tribrid_embedding_model") |
|
|
|
|
|
abstract_pred_probs=skimlit_universal_sentence_encoder_model.predict(x=(abstract_line_number_one_hot, |
|
abstract_total_lines_one_hot, |
|
tf.constant(abstract_sentences), |
|
tf.constant(abstract_char))) |
|
|
|
|
|
abstract_preds=tf.argmax(abstract_pred_probs,axis=1) |
|
|
|
|
|
predicted_classes=[class_names[i] for i in abstract_preds] |
|
|
|
|
|
summary="" |
|
for i,line in enumerate(abstract_sentences): |
|
summary=summary+f"{predicted_classes[i]}: {line}\n" |
|
|
|
return summary |
|
|
|
demo = gr.Interface(fn=make_predictions,inputs=gr.Textbox(lines=2, placeholder="Enter Abstract Here..."),outputs="text") |
|
demo.launch(debug=True, inline=True) |