indkhan commited on
Commit
b9f5a84
1 Parent(s): 7fda172

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ models/nmodelmain/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from spacy.lang.en import English
2
+ import tensorflow as tf
3
+ import streamlit as st
4
+
5
+ st.title("Research Abstract Classifier")
6
+ st.markdown("""
7
+ This app classifies study abstracts into different sections such as BACKGROUND, OBJECTIVE, METHODS, RESULTS, and CONCLUSIONS.
8
+ Enter your study abstract in the text area below.
9
+ """)
10
+
11
+ abstext = st.text_area("Enter the study")
12
+ st.link_button("Reserch Paper National Library of Medicine", "https://pubmed.ncbi.nlm.nih.gov/")
13
+ def split_chars(text):
14
+ return " ".join(list(text))
15
+
16
+ loaded_model = tf.keras.models.load_model(
17
+ r'models/nmodelmain')
18
+
19
+ if abstext:
20
+ nlp = English()
21
+ with st.spinner("Vectorizing..."):
22
+ sentencizer = nlp.add_pipe("sentencizer")
23
+ doc = nlp(abstext)
24
+ abstract_lines = [str(sent) for sent in list(doc.sents)]
25
+
26
+ total_lines_in_sample = len(abstract_lines)
27
+
28
+ sample_lines = []
29
+ for i, line in enumerate(abstract_lines):
30
+ sample_dict = {}
31
+ sample_dict["text"] = str(line)
32
+ sample_dict["line_number"] = i
33
+ sample_dict["total_lines"] = total_lines_in_sample - 1
34
+ sample_lines.append(sample_dict)
35
+
36
+
37
+ test_abstract_line_numbers = [line["line_number"] for line in sample_lines]
38
+ test_abstract_line_numbers_one_hot = tf.one_hot(
39
+ test_abstract_line_numbers, depth=15)
40
+
41
+
42
+ test_abstract_total_lines = [line["total_lines"] for line in sample_lines]
43
+ test_abstract_total_lines_one_hot = tf.one_hot(
44
+ test_abstract_total_lines, depth=20)
45
+
46
+ abstract_chars = [split_chars(sentence) for sentence in abstract_lines]
47
+
48
+ with st.spinner("predicting......"):
49
+
50
+ test_abstract_pred_probs = loaded_model.predict(x=(test_abstract_line_numbers_one_hot,
51
+ test_abstract_total_lines_one_hot,
52
+ tf.constant(abstract_lines),
53
+ tf.constant(abstract_chars)))
54
+
55
+
56
+ test_abstract_preds = tf.argmax(test_abstract_pred_probs, axis=1)
57
+
58
+ with st.spinner("Catogerizing...."):
59
+ numlab = []
60
+
61
+ for i in (test_abstract_preds):
62
+ numlab.append(int(i))
63
+ classnames = ['BACKGROUND', 'CONCLUSIONS', 'METHODS', 'OBJECTIVE', 'RESULTS']
64
+
65
+
66
+ dictionary = {}
67
+ for i in range(len(numlab)):
68
+ class_name = classnames[numlab[i]]
69
+ abstract_line = abstract_lines[i]
70
+ if class_name in dictionary:
71
+ dictionary[class_name].append(abstract_line)
72
+ else:
73
+ dictionary[class_name] = [abstract_line]
74
+ ordered_dictionary = {}
75
+ ordered_classnames = ['BACKGROUND', 'OBJECTIVE',
76
+ 'METHODS', 'RESULTS', 'CONCLUSIONS']
77
+ for class_name in ordered_classnames:
78
+ if class_name in dictionary:
79
+ ordered_dictionary[class_name] = dictionary[class_name]
80
+ lis = []
81
+ for class_name, lines in ordered_dictionary.items():
82
+ lis.append(class_name)
83
+ st.subheader(class_name)
84
+ for line in lines:
85
+ lis.append(line)
86
+ st.write(line)
models/nmodelmain/fingerprint.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3981309d6e8fc78fbe750dc0e2e1b2b97ec4c268415a7a1f8be06882ca983c3
3
+ size 56
models/nmodelmain/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ace2286dc0634d509e8c8ac16f460e7ad7b1ecbccbaf2b0c3ce99f8ce6b5d10
3
+ size 37866
models/nmodelmain/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a706863cf27c58db7371b15a5b51debf120d782337d16ac366787ee85bd9468f
3
+ size 13707313
models/nmodelmain/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85dfd722a93fec214e451db69f602d81a78c90dd30fd84ed4dc0d5f8f470dc5d
3
+ size 1028864714
models/nmodelmain/variables/variables.index ADDED
Binary file (15.7 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0
2
+ altair==5.2.0
3
+ annotated-types==0.6.0
4
+ astunparse==1.6.3
5
+ attrs==23.2.0
6
+ blinker==1.7.0
7
+ blis==0.7.11
8
+ cachetools==5.3.2
9
+ catalogue==2.0.10
10
+ certifi==2023.11.17
11
+ charset-normalizer==3.3.2
12
+ click==8.1.7
13
+ cloudpathlib==0.16.0
14
+ colorama==0.4.6
15
+ confection==0.1.4
16
+ cymem==2.0.8
17
+ flatbuffers==23.5.26
18
+ gast==0.5.4
19
+ gitdb==4.0.11
20
+ GitPython==3.1.41
21
+ google-auth==2.26.2
22
+ google-auth-oauthlib==1.2.0
23
+ google-pasta==0.2.0
24
+ grpcio==1.60.0
25
+ h5py==3.10.0
26
+ idna==3.6
27
+ importlib-metadata==7.0.1
28
+ Jinja2==3.1.3
29
+ jsonschema==4.21.1
30
+ jsonschema-specifications==2023.12.1
31
+ keras==2.15.0
32
+ langcodes==3.3.0
33
+ libclang==16.0.6
34
+ Markdown==3.5.2
35
+ markdown-it-py==3.0.0
36
+ MarkupSafe==2.1.4
37
+ mdurl==0.1.2
38
+ ml-dtypes==0.2.0
39
+ murmurhash==1.0.10
40
+ numpy==1.26.3
41
+ oauthlib==3.2.2
42
+ opt-einsum==3.3.0
43
+ packaging==23.2
44
+ pandas==2.2.0
45
+ pillow==10.2.0
46
+ preshed==3.0.9
47
+ protobuf==4.23.4
48
+ pyarrow==15.0.0
49
+ pyasn1==0.5.1
50
+ pyasn1-modules==0.3.0
51
+ pydantic==2.5.3
52
+ pydantic_core==2.14.6
53
+ pydeck==0.8.1b0
54
+ Pygments==2.17.2
55
+ python-dateutil==2.8.2
56
+ pytz==2023.3.post1
57
+ referencing==0.32.1
58
+ requests==2.31.0
59
+ requests-oauthlib==1.3.1
60
+ rich==13.7.0
61
+ rpds-py==0.17.1
62
+ rsa==4.9
63
+ six==1.16.0
64
+ smart-open==6.4.0
65
+ smmap==5.0.1
66
+ spacy==3.7.2
67
+ spacy-legacy==3.0.12
68
+ spacy-loggers==1.0.5
69
+ srsly==2.4.8
70
+ streamlit==1.30.0
71
+ tenacity==8.2.3
72
+ tensorboard==2.15.1
73
+ tensorboard-data-server==0.7.2
74
+ tensorflow==2.15.0
75
+ tensorflow-estimator==2.15.0
76
+ tensorflow-intel==2.15.0
77
+ tensorflow-io-gcs-filesystem==0.31.0
78
+ termcolor==2.4.0
79
+ thinc==8.2.2
80
+ toml==0.10.2
81
+ toolz==0.12.0
82
+ tornado==6.4
83
+ tqdm==4.66.1
84
+ typer==0.9.0
85
+ typing_extensions==4.9.0
86
+ tzdata==2023.4
87
+ tzlocal==5.2
88
+ urllib3==2.1.0
89
+ validators==0.22.0
90
+ wasabi==1.1.2
91
+ watchdog==3.0.0
92
+ weasel==0.3.4
93
+ Werkzeug==3.0.1
94
+ wrapt==1.14.1
95
+ zipp==3.17.0