bryanmildort commited on
Commit
17ca582
1 Parent(s): 360853f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
  import pandas as pd
 
3
  import re
4
 
5
  def summarize_function(notes):
@@ -10,7 +11,7 @@ def summarize_function(notes):
10
  st.write('Summary: ')
11
  return gen_text
12
 
13
- st.markdown("<h1 style='text-align: center; color: #489DDB;'>GPT Clinical Notes Summarizer 0.1v</h1>", unsafe_allow_html=True)
14
  st.markdown("<h6 style='text-align: center; color: #489DDB;'>by Bryan Mildort</h1>", unsafe_allow_html=True)
15
 
16
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
@@ -21,17 +22,15 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
21
  # device_map = infer_auto_device_map(model, dtype="float16")
22
  # st.write(device_map)
23
 
24
- @st.cache(allow_output_mutation=True)
25
  def load_model():
26
  model = AutoModelForCausalLM.from_pretrained("bryanmildort/gpt_neo_notes", low_cpu_mem_usage=True)
27
  # model = model.to(device)
28
  tokenizer = AutoTokenizer.from_pretrained("bryanmildort/gpt_neo_notes")
29
- return pipeline("text-generation", model=model, tokenizer=tokenizer)
30
-
31
-
32
  pipe = load_model()
33
 
34
-
35
  notes_df = pd.read_csv('notes_small.csv')
36
  examples_tuple = ()
37
  for i in range(len(notes_df)):
@@ -47,3 +46,10 @@ if st.button('Summarize'):
47
  parsed_input = re.sub(r'\n+', '\n',parsed_input)
48
  final_input = f"""[Notes]:\n{parsed_input}\n[Summary]:\n"""
49
  st.write(summarize_function(final_input))
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ from PIL import Image
4
  import re
5
 
6
  def summarize_function(notes):
 
11
  st.write('Summary: ')
12
  return gen_text
13
 
14
+ st.markdown("<h1 style='text-align: center; color: #489DDB;'>GPT Clinical Notes Summarizer</h1>", unsafe_allow_html=True)
15
  st.markdown("<h6 style='text-align: center; color: #489DDB;'>by Bryan Mildort</h1>", unsafe_allow_html=True)
16
 
17
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
22
  # device_map = infer_auto_device_map(model, dtype="float16")
23
  # st.write(device_map)
24
 
25
+ # @st.cache(allow_output_mutation=True)
26
  def load_model():
27
  model = AutoModelForCausalLM.from_pretrained("bryanmildort/gpt_neo_notes", low_cpu_mem_usage=True)
28
  # model = model.to(device)
29
  tokenizer = AutoTokenizer.from_pretrained("bryanmildort/gpt_neo_notes")
30
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
31
+
 
32
  pipe = load_model()
33
 
 
34
  notes_df = pd.read_csv('notes_small.csv')
35
  examples_tuple = ()
36
  for i in range(len(notes_df)):
 
46
  parsed_input = re.sub(r'\n+', '\n',parsed_input)
47
  final_input = f"""[Notes]:\n{parsed_input}\n[Summary]:\n"""
48
  st.write(summarize_function(final_input))
49
+
50
+ st.sidebar.markdown("<h1 style='text-align: center; color: #489DDB;'>GPT Clinical Notes Summarizer 0.1v</h1>", unsafe_allow_html=True)
51
+ st.sidebar.markdown("<h6 style='text-align: center; color: #489DDB;'>The model for this application was created with generous support of the Google TPU Research Cloud (TPU). This demo is for investigative research purposes only. The model is assumed to have several limiations and biases, so please oversee responses with human moderation. It is not intended for production ready enterprises and is displayed to illustrate the capabilities of Large Language Models for healthcare research.</h1>", unsafe_allow_html=True)
52
+ tower = Image.open('howard_social.png')
53
+ seal = Image.open('Howard_University_seal.svg.png')
54
+ st.sidebar.image(tower)
55
+ st.sidebar.image(seal)