danielolusipe commited on
Commit
b393fb1
1 Parent(s): 253f9ce

Create new file

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+
4
+ def split_char(text):
5
+ return " ".join(list(text))
6
+
7
+ from spacy.lang.en import English
8
+ def make_predictions(Your_Abstract):
9
+ class_names=['BACKGROUND','CONCLUSIONS','METHODS','OBJECTIVE','RESULTS']
10
+ # setup English sentence parser
11
+ nlp = English()
12
+
13
+ # create & add sentence splitting pipeline object to sentence parser
14
+ nlp.add_pipe('sentencizer')
15
+
16
+ # create "doc" of parsed sequences
17
+ doc=nlp(Your_Abstract)
18
+
19
+ # Create a list
20
+ sents_list = []
21
+ for sent in doc.sents:
22
+ sents_list.append(sent.text)
23
+
24
+ abstract_sentences=sents_list
25
+
26
+ # Creating a loop to go through each line in abstract and create a list of dictionaries containing festure for each lines
27
+ sample_line=[]
28
+ for i,line in enumerate(abstract_sentences):
29
+ sample_dict={}
30
+ sample_dict["text"]=str(line)
31
+ sample_dict["line_number"]=i
32
+ sample_dict["total_lines"]=len(abstract_sentences)-1
33
+ sample_line.append(sample_dict)
34
+
35
+ # Get all the line_number values from sample abstract
36
+ abstract_line_number=[line["line_number"] for line in sample_line]
37
+ # one-hot encoding line_number values
38
+ abstract_line_number_one_hot=tf.one_hot(abstract_line_number,depth=15)
39
+
40
+ # Getting all total_number values from sample abstract
41
+ abstract_total_lines=[line["total_lines"] for line in sample_line]
42
+ # One-hot encoding total_number lines
43
+ abstract_total_lines_one_hot=tf.one_hot(abstract_total_lines,depth=20)
44
+
45
+ # Splitting abstract lines into character
46
+ abstract_char=[split_char(sentence) for sentence in abstract_sentences]
47
+ #abstract_char=[" ".join(list((sentence) for sentence in abstract_sentences))]
48
+
49
+ # Loading in model and getting a summary of loaded model
50
+ skimlit_universal_sentence_encoder_model=tf.keras.models.load_model("/content/drive/MyDrive/skimlit_models/Universal_sentence_encoder_Tribrid_embedding_model")
51
+
52
+ # Making prediction with loaded model on sample abstract
53
+ abstract_pred_probs=skimlit_universal_sentence_encoder_model.predict(x=(abstract_line_number_one_hot,
54
+ abstract_total_lines_one_hot,
55
+ tf.constant(abstract_sentences),
56
+ tf.constant(abstract_char)))
57
+
58
+ # Turning model's prediction into labels
59
+ abstract_preds=tf.argmax(abstract_pred_probs,axis=1)
60
+
61
+ # Turn predicted labels into string class names
62
+ predicted_classes=[class_names[i] for i in abstract_preds]
63
+
64
+ # Visualizing abstract lines and predicted labels
65
+ summary=""
66
+ for i,line in enumerate(abstract_sentences):
67
+ summary=summary+f"{predicted_classes[i]}: {line}\n"
68
+ #summary=f"{predicted_classes[i]}: {line}"
69
+ return summary
70
+
71
+ demo = gr.Interface(fn=make_predictions,inputs=gr.Textbox(lines=2, placeholder="Abstract Here..."),outputs="text")
72
+ demo.launch(debug=True, inline=True)