File size: 2,934 Bytes
b393fb1
 
 
 
 
 
 
801ac9f
b393fb1
 
 
 
 
 
 
 
35c1934
b393fb1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
801ac9f
b393fb1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
import tensorflow as tf

def split_char(text):
  return " ".join(list(text))
  
from spacy.lang.en import English
def make_predictions(Input):
  class_names=['BACKGROUND','CONCLUSIONS','METHODS','OBJECTIVE','RESULTS']
  # setup English sentence parser
  nlp = English()

  # create & add sentence splitting pipeline object to sentence parser
  nlp.add_pipe('sentencizer')

  # create "doc" of parsed sequences
  doc=nlp(Input)

  # Create a list 
  sents_list = [] 
  for sent in doc.sents:
    sents_list.append(sent.text)

  abstract_sentences=sents_list

  # Creating a loop to go through each line in abstract and create a list of dictionaries containing festure for each lines
  sample_line=[]
  for i,line in enumerate(abstract_sentences):
    sample_dict={}
    sample_dict["text"]=str(line)
    sample_dict["line_number"]=i
    sample_dict["total_lines"]=len(abstract_sentences)-1
    sample_line.append(sample_dict)
  
  # Get all the line_number values from sample abstract
  abstract_line_number=[line["line_number"] for line in sample_line]
  # one-hot encoding line_number values
  abstract_line_number_one_hot=tf.one_hot(abstract_line_number,depth=15)

  # Getting all total_number values from sample abstract
  abstract_total_lines=[line["total_lines"] for line in sample_line]
  # One-hot encoding total_number lines
  abstract_total_lines_one_hot=tf.one_hot(abstract_total_lines,depth=20)

  # Splitting abstract lines into character
  abstract_char=[split_char(sentence) for sentence in abstract_sentences]
  #abstract_char=[" ".join(list((sentence) for sentence in abstract_sentences))]
  
  # Loading in model and getting a summary of loaded model
  skimlit_universal_sentence_encoder_model=tf.keras.models.load_model("/content/drive/MyDrive/skimlit_models/Universal_sentence_encoder_Tribrid_embedding_model")

  # Making prediction with loaded model on sample abstract
  abstract_pred_probs=skimlit_universal_sentence_encoder_model.predict(x=(abstract_line_number_one_hot,
                                                                          abstract_total_lines_one_hot,
                                                                          tf.constant(abstract_sentences),
                                                                          tf.constant(abstract_char)))
  
  # Turning model's prediction into labels
  abstract_preds=tf.argmax(abstract_pred_probs,axis=1)

  # Turn predicted labels into string class names
  predicted_classes=[class_names[i] for i in abstract_preds]

  # Visualizing abstract lines and predicted labels
  summary=""
  for i,line in enumerate(abstract_sentences):
    summary=summary+f"{predicted_classes[i]}: {line}\n"
    #summary=f"{predicted_classes[i]}: {line}"
  return summary
  
demo = gr.Interface(fn=make_predictions,inputs=gr.Textbox(lines=2, placeholder="Enter Abstract Here..."),outputs="text")
demo.launch(debug=True, inline=True)