sasha HF staff commited on
Commit
20fe0c2
1 Parent(s): 1883204

initial Space version

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +149 -0
README.md CHANGED
@@ -2,7 +2,7 @@
2
  title: HypeCheck
3
  emoji: 🐠
4
  colorFrom: red
5
- colorTo: red
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
  app_file: app.py
 
2
  title: HypeCheck
3
  emoji: 🐠
4
  colorFrom: red
5
+ colorTo: blue
6
  sdk: streamlit
7
  sdk_version: 1.10.0
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spacy_streamlit
2
+ from spacy.symbols import *
3
+ import streamlit as st
4
+ import html
5
+
6
+ from htbuilder import H, HtmlElement, styles
7
+ from htbuilder.units import unit
8
+ from nltk import bigrams
9
+
10
+ # Only works in 3.7+: from htbuilder import div, span
11
+ div = H.div
12
+ span = H.span
13
+
14
+ # Only works in 3.7+: from htbuilder.units import px, rem, em
15
+ px = unit.px
16
+ rem = unit.rem
17
+ em = unit.em
18
+
19
+ # Colors from the Streamlit palette.
20
+ # These are red-70, orange-70, ..., violet-70, gray-70.
21
+ PALETTE = [
22
+ "#ff4b4b",
23
+ "#ffa421",
24
+ "#ffe312",
25
+ "#21c354",
26
+ "#00d4b1",
27
+ "#00c0f2",
28
+ "#1c83e1",
29
+ "#803df5",
30
+ "#808495",
31
+ ]
32
+
33
+ OPACITIES = [
34
+ "33", "66",
35
+ ]
36
+
37
+ DEFAULT_TEXT = """AI has reached superhuman levels in various areas such as playing complex strategic and video games, calculating protein folding, and visual recognition. Are we close to superhuman levels in conversational AI as well?"""
38
+
39
+ spacy_model = "en_core_web_sm"
40
+
41
+ replacement_dict= {
42
+ "superhuman levels" : "high accuracy",
43
+ "conversational AI" : "language generation"
44
+ }
45
+
46
+
47
+ def annotation(body, label="", background=None, color=None, **style):
48
+ """
49
+ from https://github.com/tvst/st-annotated-text/blob/master/annotated_text/util.py
50
+ """
51
+
52
+ color_style = {}
53
+
54
+ if color:
55
+ color_style['color'] = color
56
+
57
+ if not background:
58
+ label_sum = sum(ord(c) for c in label)
59
+ background_color = PALETTE[label_sum % len(PALETTE)]
60
+ background_opacity = OPACITIES[label_sum % len(OPACITIES)]
61
+ background = background_color + background_opacity
62
+
63
+ return (
64
+ span(
65
+ style=styles(
66
+ background=background,
67
+ border_radius=rem(0.33),
68
+ padding=(rem(0.125), rem(0.5)),
69
+ overflow="hidden",
70
+ **color_style,
71
+ **style,
72
+ ))(
73
+
74
+ html.escape(body),
75
+
76
+ span(
77
+ style=styles(
78
+ padding_left=rem(0.5),
79
+ text_transform="uppercase",
80
+ ))(
81
+ span(
82
+ style=styles(
83
+ font_size=em(0.67),
84
+ opacity=0.5,
85
+ ))(
86
+ html.escape(label),
87
+ ),
88
+ ),
89
+ )
90
+ )
91
+
92
+
93
+ def get_annotated_html(*args):
94
+
95
+ out = div()
96
+
97
+ for arg in args:
98
+ if isinstance(arg, str):
99
+ out(html.escape(arg))
100
+
101
+ elif isinstance(arg, HtmlElement):
102
+ out(arg)
103
+
104
+ elif isinstance(arg, tuple):
105
+ out(annotation(*arg))
106
+
107
+ elif isinstance(arg,list):
108
+ for el in arg:
109
+ if isinstance(el, str):
110
+ out(html.escape(el))
111
+
112
+ elif isinstance(el, HtmlElement):
113
+ out(el)
114
+
115
+ elif isinstance(el, tuple):
116
+ out(annotation(*el))
117
+ else:
118
+ raise Exception("Oh noes!")
119
+
120
+ return str(out)
121
+
122
+ st.title("AI Hype Checker")
123
+ text = st.text_area("Paste your over-hyped text here:", DEFAULT_TEXT, height=100)
124
+ doc = spacy_streamlit.process_text(spacy_model, text)
125
+
126
+ for chunk in doc.noun_chunks:
127
+ if chunk.text in replacement_dict.keys():
128
+ text= text.replace(chunk.text, replacement_dict[chunk.text])
129
+ else:
130
+ continue
131
+
132
+ text = st.text_area("Fixed it! See below:", text, height=100)
133
+
134
+ st.markdown("## Here are the terms that we flagged:")
135
+ chunks= [chunk.text for chunk in doc.noun_chunks]
136
+ flagged_chunks=[]
137
+ for i in range(len(chunks)):
138
+ if chunks[i] in replacement_dict.keys():
139
+ flagged_chunks.append((chunks[i], replacement_dict[chunks[i]]))
140
+
141
+ flagged_chunks = list(set(flagged_chunks))
142
+ for f in flagged_chunks:
143
+ st.markdown(
144
+ get_annotated_html(f),
145
+ unsafe_allow_html=True,
146
+ )
147
+
148
+
149
+ #st.text(f"Analyzed using spaCy model {spacy_model}")