maxspad commited on
Commit
9be5a22
β€’
1 Parent(s): 5a43da6

initial commit

Browse files
Files changed (3) hide show
  1. app.py +139 -0
  2. requirements.txt +7 -0
  3. test.pkl +3 -0
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import transformers as tf
3
+ import plotly.graph_objects as go
4
+ import matplotlib.cm as cm
5
+ import pandas as pd
6
+
7
+
8
+ # Function to load and cache models
9
+ @st.experimental_singleton(show_spinner=False)
10
+ def load_model(username, prefix, model_name):
11
+ p = tf.pipeline('text-classification', f'{username}/{prefix}-{model_name}')
12
+ return p
13
+
14
+ @st.experimental_singleton(show_spinner=False)
15
+ def load_pickle(f):
16
+ return pd.read_pickle(f)
17
+
18
+ def get_results(model, c):
19
+ res = model(c)[0]
20
+ label = float(res['label'].split('_')[1])
21
+ score = res['score']
22
+ return {'label': label, 'score': score}
23
+
24
+ def run_models(model_names, models, c):
25
+ results = {}
26
+ for mn in model_names:
27
+ results[mn] = get_results(models[mn], c)
28
+ return results
29
+
30
+
31
+ st.title('Assess the *QuAL*ity of your feedback')
32
+ st.caption(
33
+ """Medical education *requires* high-quality feedback, but evaluating feedback
34
+ is difficult and time-consuming. This tool uses NLP/ML to predict a validated
35
+ feedback quality metric known as the QuAL Score. *Try it for yourself!*
36
+ """)
37
+
38
+ ### Load models
39
+ # Specify which models to load
40
+ USERNAME = 'maxspad'
41
+ PREFIX = 'nlp-qual'
42
+ models_to_load = ['qual', 'q1', 'q2i', 'q3i']
43
+ n_models = float(len(models_to_load))
44
+ models = {}
45
+ # Show a progress bar while models are downloading,
46
+ # then hide it when done
47
+ lc_placeholder = st.empty()
48
+ loader_container = lc_placeholder.container()
49
+ loader_container.caption('Loading models... please wait...')
50
+ pbar = loader_container.progress(0.0)
51
+ for i, mn in enumerate(models_to_load):
52
+ pbar.progress((i+1.0) / n_models)
53
+ models[mn] = load_model(USERNAME, PREFIX, mn)
54
+ lc_placeholder.empty()
55
+
56
+ ### Load example data
57
+ examples = load_pickle('test.pkl')
58
+
59
+ ### Process input
60
+ ex = examples['comment'].sample(1).tolist()[0]
61
+ try:
62
+ ex = ex.strip().replace('_x000D_', '').replace('nan', 'blank')
63
+ except:
64
+ ex = 'blank'
65
+ if 'comment' not in st.session_state:
66
+ st.session_state['comment'] = ex
67
+ with st.form('comment_form'):
68
+ comment = st.text_area('Try a comment:', value=st.session_state['comment'])
69
+ left_col, right_col = st.columns([1,9], gap='medium')
70
+ submitted = left_col.form_submit_button('Submit')
71
+ trying_example = right_col.form_submit_button('Try an example!')
72
+
73
+ if submitted:
74
+ st.session_state['button_clicked'] = 'submit'
75
+ st.session_state['comment'] = comment
76
+ st.experimental_rerun()
77
+ elif trying_example:
78
+ st.session_state['button_clicked'] = 'example'
79
+ st.session_state['comment'] = ex
80
+ st.experimental_rerun()
81
+
82
+ results = run_models(models_to_load, models, st.session_state['comment'])
83
+
84
+ tab_titles = ['Overview', 'Q1 - Level of Detail', 'Q2 - Suggestion Given', 'Q3 - Suggestion Linked', 'About']
85
+ tabs = st.tabs(tab_titles)
86
+
87
+ with tabs[0]:
88
+ cmap = cm.get_cmap('RdYlGn')
89
+ color = cmap(results['qual']['label'] / 6.0)
90
+ color = f'rgba({int(color[0]*256)}, {int(color[1]*256)}, {int(color[2]*256)}, {int(color[3]*256)})'
91
+
92
+ fig = go.Figure(go.Indicator(
93
+ domain = {'x': [0, 1], 'y': [0, 1]},
94
+ value = results['qual']['label'],
95
+ mode = "gauge+number",
96
+ title = {'text': "QuAL"},
97
+ gauge = {'axis': {'range': [None, 5]},
98
+ 'bgcolor': 'lightgray',
99
+ 'bar': {'color': color, 'thickness': 1.0},
100
+
101
+ }
102
+ ), layout=go.Layout(margin=dict(t=0, b=135)))#, layout=go.Layout(width=750, height=300))# layout={'paper_bgcolor': 'rgb(245,245,245)'})#,
103
+
104
+ cols = st.columns([7, 3])
105
+ with cols[0]:
106
+ st.plotly_chart(fig, use_container_width=True)
107
+ with cols[1]:
108
+
109
+
110
+ # cols = st.columns(3)
111
+ # cols[0].markdown('#### Level of Detail')
112
+ q1lab = results['q1']['label']
113
+ if q1lab == 0:
114
+ md_str = 'πŸ˜₯ None'
115
+ elif q1lab == 1:
116
+ md_str = '😐 Low'
117
+ elif q1lab == 2:
118
+ md_str = '😊 Medium'
119
+ elif q1lab == 3:
120
+ md_str = '😁 High'
121
+ # cols[0].markdown(md_str)
122
+ cols[1].metric('Level of Detail', md_str,
123
+ help='How specific was the evaluator in describing the behavior?')
124
+
125
+ q2lab = results['q2i']['label']
126
+ if q2lab == 0:
127
+ md_str = 'βœ… Yes'
128
+ else:
129
+ md_str = '❌ No'
130
+ cols[1].metric('Suggestion Given', (md_str),
131
+ help='Did the evaluator give a suggestion for improvement?')
132
+
133
+ q3lab = results['q3i']['label']
134
+ if q3lab == 0:
135
+ md_str = 'βœ… Yes'
136
+ else:
137
+ md_str = '❌ No'
138
+ cols[1].metric('Suggestion Linked', md_str,
139
+ help='Is the suggestion for improvement linked to the described behavior?')
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ torchaudio
4
+ transformers
5
+ plotly==5.11.0
6
+ pandas
7
+ spacy
test.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fffa01055fbedf253065c9a12f02c0aa3599856b59c22ebaa09ca23404a9742b
3
+ size 10474780