Spaces:
Sleeping
Sleeping
Nathan Schneider
commited on
Commit
·
7473b3d
1
Parent(s):
2a5f71e
line endings
Browse files- app.py +376 -376
- requirements.txt +6 -6
app.py
CHANGED
|
@@ -1,376 +1,376 @@
|
|
| 1 |
-
import html
|
| 2 |
-
import gradio as gr
|
| 3 |
-
import spaces
|
| 4 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification, TokenClassificationPipeline
|
| 5 |
-
import torch
|
| 6 |
-
import numpy as np
|
| 7 |
-
|
| 8 |
-
#Description text for the Gradio interface
|
| 9 |
-
DESCR_TOP = """
|
| 10 |
-
<h1 style="text-align: center">SNACS Tagging</h1>
|
| 11 |
-
"""
|
| 12 |
-
|
| 13 |
-
DESCR_PART_3 = """
|
| 14 |
-
<details><summary>Linguistic notes</summary>
|
| 15 |
-
<ul>
|
| 16 |
-
<li>Some of the tagged items are single words (like <b><i>to</i></b>); others are multiword expressions (like <b><i>according to</i></b>).</li>
|
| 17 |
-
<li>Possessive markers and possessive pronouns are tagged.</li>
|
| 18 |
-
<li>The English infinitive marker <b><i>to</i></b> is tagged if it marks a purpose.</li>
|
| 19 |
-
<li>Phrasal verb particles (like <b><i>up</i></b> in <b><i>give up</i></b>) are not tagged if the meaning is idiomatic.
|
| 20 |
-
However, words like <b><i>up</i></b>, <b><i>away</i></b>, and <b><i>together</i></b> are tagged if the meaning is spatial
|
| 21 |
-
(“The bird flew <b><i>away</i></b>”).</li>
|
| 22 |
-
</ul>
|
| 23 |
-
</details>
|
| 24 |
-
|
| 25 |
-
<p>Try the examples below, or enter your own text in the box and click the Tag! button.
|
| 26 |
-
</p>
|
| 27 |
-
"""
|
| 28 |
-
|
| 29 |
-
DESCR_PARA_1 = """<p>🌐 Enter text <b>in any language</b> to analyze the in-context meanings of adpositions/possessives/case markers.
|
| 30 |
-
An <b>adposition</b> is a <i>pre</i>position (that precedes a noun, as in English) or a <i>post</i>position (that follows a noun, as in Japanese).
|
| 31 |
-
The tagger adds semantic labels from the SNACS tagset to indicate spatial, temporal, and other kinds of relationships.
|
| 32 |
-
See the <a href="https://www.xposition.org/">Xposition site</a> and <a href="https://arxiv.org/abs/1704.02134">PDF manual</a> for details.</p>
|
| 33 |
-
"""
|
| 34 |
-
|
| 35 |
-
DESCR_PARA_2 = """<p>🤖 The tagger is a machine learning <a href="https://github.com/WesScivetti/snacs/tree/main">system</a> (specifically XLM-RoBERTa-large)
|
| 36 |
-
that has been fine-tuned on manually tagged data in 5 target languages: English, Mandarin Chinese, Hindi, Gujarati, and Japanese.
|
| 37 |
-
The system output is not always correct (even if the model’s confidence estimate is close to 100%),
|
| 38 |
-
and will likely be less accurate beyond the target languages.</p>
|
| 39 |
-
"""
|
| 40 |
-
|
| 41 |
-
# short labels shown on the buttons, long text inserted into the textbox
|
| 42 |
-
EXAMPLES = [
|
| 43 |
-
['When Mr. and Mrs. Dursley woke up on the dull, gray Tuesday our story starts, there was nothing about the cloudy sky outside to suggest that strange and mysterious things would soon be happening all over the country. Mr. Dursley hummed as he picked out his most boring tie for work, and Mrs. Dursley gossiped away happily as she wrestled a screaming Dudley into his high chair.'], # inserts "example 1" text
|
| 44 |
-
['Humpty Dumpty was sitting, with his legs crossed like a Turk, on the top of a high wall — such a narrow one that Alice quite wondered how he could keep his balance — and, as his eyes were steadily fixed in the opposite direction, and he didn\'t take the least notice of her, she thought he must be a stuffed figure, after all.'],
|
| 45 |
-
['In West Philadelphia born and raised\nOn the playground is where I spent most of my days\nChillin\' out, maxin\', relaxin\' all cool\nAnd all shootin\' some b-ball outside of the school\nWhen a couple of guys who were up to no good\nStarted makin\' trouble in my neighborhood\nI got in one little fight and my mom got scared\nAnd said "You\'re movin\' with your auntie and uncle in Bel-Air"'],
|
| 46 |
-
['En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino.']
|
| 47 |
-
]
|
| 48 |
-
EXAMPLE_LABELS = ['Harry Potter (en)', 'Through the Looking Glass (en)', 'Fresh Prince of Bel-Air (en)', 'Don Quixote (es)']
|
| 49 |
-
|
| 50 |
-
class MyPipeline(TokenClassificationPipeline):
|
| 51 |
-
"""Custom Pipeline class with custom postprocess function, designed to output proability distribution in addition to top scores
|
| 52 |
-
Inherits from HF TokenClassificationPipeline"""
|
| 53 |
-
def postprocess(self, all_outputs, aggregation_strategy="none", ignore_labels=None):
|
| 54 |
-
try:
|
| 55 |
-
from transformers.pipelines.token_classification import AggregationStrategy
|
| 56 |
-
if isinstance(aggregation_strategy, AggregationStrategy):
|
| 57 |
-
aggregation_strategy = aggregation_strategy.name.lower()
|
| 58 |
-
except Exception:
|
| 59 |
-
pass
|
| 60 |
-
if isinstance(aggregation_strategy, str):
|
| 61 |
-
aggregation_strategy = aggregation_strategy.lower()
|
| 62 |
-
|
| 63 |
-
if ignore_labels is None:
|
| 64 |
-
ignore_labels = ["O"]
|
| 65 |
-
|
| 66 |
-
id2label = self.model.config.id2label
|
| 67 |
-
label_ids_sorted = sorted(int(k) for k in id2label.keys()) if isinstance(id2label, dict) else list(range(len(id2label)))
|
| 68 |
-
labels_sorted = [id2label[i] for i in label_ids_sorted]
|
| 69 |
-
|
| 70 |
-
def _softmax(logits):
|
| 71 |
-
maxes = np.max(logits, axis=-1, keepdims=True)
|
| 72 |
-
shifted = logits - maxes
|
| 73 |
-
exp = np.exp(shifted)
|
| 74 |
-
return exp / exp.sum(axis=-1, keepdims=True)
|
| 75 |
-
|
| 76 |
-
all_pre_entities = []
|
| 77 |
-
all_grouped_entities = []
|
| 78 |
-
|
| 79 |
-
word_to_chars_map = all_outputs[0].get("word_to_chars_map")
|
| 80 |
-
sentence = all_outputs[0]["sentence"]
|
| 81 |
-
|
| 82 |
-
for model_outputs in all_outputs:
|
| 83 |
-
if self.framework == "pt" and model_outputs["logits"][0].dtype in (torch.bfloat16, torch.float16):
|
| 84 |
-
logits = model_outputs["logits"][0].to(torch.float32).numpy()
|
| 85 |
-
else:
|
| 86 |
-
logits = model_outputs["logits"][0].numpy()
|
| 87 |
-
|
| 88 |
-
scores = _softmax(logits)
|
| 89 |
-
|
| 90 |
-
input_ids = model_outputs["input_ids"][0]
|
| 91 |
-
offset_mapping = model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None
|
| 92 |
-
special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy()
|
| 93 |
-
word_ids = model_outputs.get("word_ids")
|
| 94 |
-
|
| 95 |
-
if self.framework == "tf":
|
| 96 |
-
input_ids = input_ids.numpy()
|
| 97 |
-
offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None
|
| 98 |
-
|
| 99 |
-
pre_entities = self.gather_pre_entities(
|
| 100 |
-
sentence,
|
| 101 |
-
input_ids,
|
| 102 |
-
scores,
|
| 103 |
-
offset_mapping,
|
| 104 |
-
special_tokens_mask,
|
| 105 |
-
aggregation_strategy, # string is fine
|
| 106 |
-
word_ids=word_ids,
|
| 107 |
-
word_to_chars_map=word_to_chars_map,
|
| 108 |
-
)
|
| 109 |
-
|
| 110 |
-
grouped_entities = self.aggregate(pre_entities, aggregation_strategy)
|
| 111 |
-
grouped_entities = [
|
| 112 |
-
e for e in grouped_entities
|
| 113 |
-
if e.get("entity", None) not in ignore_labels
|
| 114 |
-
and e.get("entity_group", None) not in ignore_labels
|
| 115 |
-
]
|
| 116 |
-
|
| 117 |
-
all_pre_entities.extend(pre_entities)
|
| 118 |
-
all_grouped_entities.extend(grouped_entities)
|
| 119 |
-
|
| 120 |
-
if len(all_outputs) > 1:
|
| 121 |
-
all_grouped_entities = self.aggregate_overlapping_entities(all_grouped_entities)
|
| 122 |
-
|
| 123 |
-
def token_pred_label_id(token_scores: np.ndarray) -> int:
|
| 124 |
-
return int(token_scores.argmax())
|
| 125 |
-
|
| 126 |
-
def label_from_entity_dict(ent: dict) -> str | None:
|
| 127 |
-
if "entity_group" in ent and ent["entity_group"] is not None:
|
| 128 |
-
return ent["entity_group"]
|
| 129 |
-
if "entity" in ent and ent["entity"] is not None:
|
| 130 |
-
tag = ent["entity"]
|
| 131 |
-
if tag.startswith(("B-","I-")):
|
| 132 |
-
return tag[2:]
|
| 133 |
-
return tag
|
| 134 |
-
return None
|
| 135 |
-
|
| 136 |
-
def spans_overlap(a_start, a_end, b_start, b_end):
|
| 137 |
-
if a_start is None or a_end is None or b_start is None or b_end is None:
|
| 138 |
-
return False
|
| 139 |
-
return max(a_start, b_start) < min(a_end, b_end)
|
| 140 |
-
|
| 141 |
-
pre_tokens = []
|
| 142 |
-
for pe in all_pre_entities:
|
| 143 |
-
pre_tokens.append({
|
| 144 |
-
"start": pe.get("start"),
|
| 145 |
-
"end": pe.get("end"),
|
| 146 |
-
"index": pe.get("index"),
|
| 147 |
-
"scores": pe.get("scores"),
|
| 148 |
-
"pred_id": token_pred_label_id(pe.get("scores")),
|
| 149 |
-
"pred_label": id2label[token_pred_label_id(pe.get("scores"))]
|
| 150 |
-
})
|
| 151 |
-
|
| 152 |
-
def average_probs(token_list):
|
| 153 |
-
if not token_list:
|
| 154 |
-
return None
|
| 155 |
-
arr = np.stack([t["scores"] for t in token_list], axis=0)
|
| 156 |
-
avg = np.nanmean(arr, axis=0)
|
| 157 |
-
s = float(avg.sum())
|
| 158 |
-
if s > 0:
|
| 159 |
-
avg = avg / s
|
| 160 |
-
return avg
|
| 161 |
-
|
| 162 |
-
results_with_probs = []
|
| 163 |
-
for ent in all_grouped_entities:
|
| 164 |
-
ent_start = ent.get("start")
|
| 165 |
-
ent_end = ent.get("end")
|
| 166 |
-
ent_tag = label_from_entity_dict(ent)
|
| 167 |
-
|
| 168 |
-
if aggregation_strategy == "none":
|
| 169 |
-
idx = ent.get("index")
|
| 170 |
-
candidate_tokens = [t for t in pre_tokens if t["index"] == idx] if idx is not None else []
|
| 171 |
-
else:
|
| 172 |
-
overlapping = [t for t in pre_tokens if spans_overlap(ent_start, ent_end, t["start"], t["end"])]
|
| 173 |
-
def strip_bi(lbl): return lbl[2:] if lbl.startswith(("B-","I-")) else lbl
|
| 174 |
-
if ent_tag is not None:
|
| 175 |
-
overlapping = [t for t in overlapping if strip_bi(t["pred_label"]) == ent_tag]
|
| 176 |
-
candidate_tokens = overlapping
|
| 177 |
-
|
| 178 |
-
avg = average_probs(candidate_tokens)
|
| 179 |
-
if avg is None:
|
| 180 |
-
probs_vec = np.zeros((len(labels_sorted),), dtype=float)
|
| 181 |
-
ent_label = ent.get("entity") or ent.get("entity_group")
|
| 182 |
-
if ent_label and ent_label.startswith(("B-","I-")):
|
| 183 |
-
ent_label = ent_label[2:]
|
| 184 |
-
chosen_i = None
|
| 185 |
-
for i, lab in enumerate(labels_sorted):
|
| 186 |
-
base = lab[2:] if lab.startswith(("B-","I-")) else lab
|
| 187 |
-
if ent_label == base:
|
| 188 |
-
chosen_i = i
|
| 189 |
-
break
|
| 190 |
-
probs_vec[chosen_i if chosen_i is not None else 0] = 1.0
|
| 191 |
-
else:
|
| 192 |
-
probs_vec = avg
|
| 193 |
-
|
| 194 |
-
ent["probabilities"] = {labels_sorted[i]: float(probs_vec[i]) for i in range(len(labels_sorted))}
|
| 195 |
-
results_with_probs.append(ent)
|
| 196 |
-
|
| 197 |
-
return results_with_probs
|
| 198 |
-
|
| 199 |
-
@spaces.GPU
|
| 200 |
-
def classify_tokens(text: str):
|
| 201 |
-
"""Main function for SNACS text classification that is called in the huggingface space
|
| 202 |
-
Input: string to be tagged
|
| 203 |
-
Output: HTML styled rendering of tagged outputs
|
| 204 |
-
styled_html1: HTML output with entities grouped
|
| 205 |
-
table_html: Labels from output one along with confidence scores
|
| 206 |
-
styled_html2: HTML output of labels for raw tokenized output (no grouping of subwords or entities). Top 5 label scores are displayed."""
|
| 207 |
-
|
| 208 |
-
PALETTE = [ # "#1f77b4",
|
| 209 |
-
"#ff7f0e",
|
| 210 |
-
"#2ca02c",
|
| 211 |
-
"#d62728",
|
| 212 |
-
"#9467bd",
|
| 213 |
-
# "#8c564b",
|
| 214 |
-
"#e377c2",
|
| 215 |
-
# "#7f7f7f",
|
| 216 |
-
"#cccc00", # tweaked
|
| 217 |
-
"#17becf",
|
| 218 |
-
"#aec7e8",
|
| 219 |
-
"#ffbb78",
|
| 220 |
-
"#c49c94",
|
| 221 |
-
"#c5b0d5",
|
| 222 |
-
"#98df8a",
|
| 223 |
-
"#ff9896",
|
| 224 |
-
"#f7b6d2",
|
| 225 |
-
# "#c7c7c7",
|
| 226 |
-
"#f7f777", # tweaked
|
| 227 |
-
"#9edae5"
|
| 228 |
-
][::-1] # reverse-sort to put the lighter colors first
|
| 229 |
-
|
| 230 |
-
model_name = "WesScivetti/SNACS_Multilingual"
|
| 231 |
-
|
| 232 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 233 |
-
model = AutoModelForTokenClassification.from_pretrained(model_name, torch_dtype=torch.float16 if torch.cuda.is_available() else None)
|
| 234 |
-
# ONE pipeline; override aggregation per-call
|
| 235 |
-
pipe = MyPipeline(
|
| 236 |
-
model=model,
|
| 237 |
-
tokenizer=tokenizer,
|
| 238 |
-
device=0,
|
| 239 |
-
framework="pt"
|
| 240 |
-
)
|
| 241 |
-
|
| 242 |
-
results_simple = pipe(text, aggregation_strategy="simple") # output #1
|
| 243 |
-
results_none = pipe(text, aggregation_strategy="none", ignore_labels=[]) # output #2 (per-token + probabilities)
|
| 244 |
-
print(results_none)
|
| 245 |
-
|
| 246 |
-
# sort
|
| 247 |
-
sorted_results1 = sorted(results_simple, key=lambda x: x["start"])
|
| 248 |
-
sorted_results2 = sorted(results_none, key=lambda x: x["start"])
|
| 249 |
-
|
| 250 |
-
# color helper that tolerates B-/I- prefixes
|
| 251 |
-
def pick_color(label: str, lbl2color: dict) -> str:
|
| 252 |
-
base = label[2:] if label.startswith(("B-", "I-")) else label
|
| 253 |
-
if base in lbl2color:
|
| 254 |
-
color = lbl2color[base]
|
| 255 |
-
elif base == "O":
|
| 256 |
-
color = "#b0adac"
|
| 257 |
-
lbl2color[base] = color
|
| 258 |
-
else:
|
| 259 |
-
color = PALETTE[len(lbl2color) % len(PALETTE)]
|
| 260 |
-
lbl2color[base] = color
|
| 261 |
-
return color
|
| 262 |
-
|
| 263 |
-
def display_label(label: str) -> str:
|
| 264 |
-
"""Simplified version of the label to display, removing "p." prefix and un-duplicating supersenses"""
|
| 265 |
-
lab = label.replace("p.", "")
|
| 266 |
-
lab1, lab2 = lab.split("-")
|
| 267 |
-
if lab1==lab2:
|
| 268 |
-
lab = lab1
|
| 269 |
-
else:
|
| 270 |
-
lab = lab1 + "~>" + lab2
|
| 271 |
-
return lab
|
| 272 |
-
|
| 273 |
-
# ---------- Output 1: SIMPLE (grouped spans) ----------
|
| 274 |
-
output1, last_idx = "", 0
|
| 275 |
-
lbl2color = {}
|
| 276 |
-
for e in sorted_results1:
|
| 277 |
-
s, t = e["start"], e["end"]
|
| 278 |
-
lab = e["entity_group"] # grouped results use entity_group
|
| 279 |
-
short_lab = display_label(lab)
|
| 280 |
-
score = e["score"]
|
| 281 |
-
word = html.escape(text[s:t])
|
| 282 |
-
output1 += html.escape(text[last_idx:s])
|
| 283 |
-
color = pick_color(lab, lbl2color)
|
| 284 |
-
tooltip = f"{short_lab} ({score:.2f})"
|
| 285 |
-
word_with_label = f"{word}"
|
| 286 |
-
output1 += (
|
| 287 |
-
f"<span style='background-color:{color};padding:2px;border-radius:4px;' "
|
| 288 |
-
f"title='{tooltip}'>{word_with_label}</span>"
|
| 289 |
-
)
|
| 290 |
-
last_idx = t
|
| 291 |
-
output1 += html.escape(text[last_idx:])
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
output2, last_idx2 = "", 0
|
| 295 |
-
for e in sorted_results2:
|
| 296 |
-
s, t = e["start"], e["end"]
|
| 297 |
-
lab = e["entity"] # NONE returns `entity`
|
| 298 |
-
probs = e["probabilities"]
|
| 299 |
-
word = html.escape(text[s:t])
|
| 300 |
-
output2 += html.escape(text[last_idx2:s])
|
| 301 |
-
color = pick_color(lab, lbl2color)
|
| 302 |
-
|
| 303 |
-
top5 = sorted(probs.items(), key=lambda kv: kv[1], reverse=True)[:5]
|
| 304 |
-
top5_lines = [f"{html.escape(k)}: {v:.2%}" for k, v in top5]
|
| 305 |
-
tooltip = "Top-5 " + " ".join(top5_lines)
|
| 306 |
-
|
| 307 |
-
word_with_label = f"{word}_{html.escape(lab)}"
|
| 308 |
-
output2 += (
|
| 309 |
-
f"<span style='background-color:{color};padding:2px;border-radius:4px;' "
|
| 310 |
-
f"title='{tooltip}'>{word_with_label}</span>"
|
| 311 |
-
)
|
| 312 |
-
last_idx2 = t
|
| 313 |
-
output2 += html.escape(text[last_idx2:])
|
| 314 |
-
|
| 315 |
-
# (table can use results_simple)
|
| 316 |
-
table_html = "<table style='border-collapse:collapse;font-family:sans-serif;'>"
|
| 317 |
-
table_html += "<tr><th style='border:1px solid #ccc;padding:6px;'>Token</th>"
|
| 318 |
-
table_html += "<th style='border:1px solid #ccc;padding:6px;'>SNACS Label</th>"
|
| 319 |
-
table_html += "<th style='border:1px solid #ccc;padding:6px;'>Confidence</th></tr>"
|
| 320 |
-
for e in sorted_results1:
|
| 321 |
-
token = html.escape(e["word"])
|
| 322 |
-
lab = e["entity_group"]
|
| 323 |
-
short_lab = display_label(lab)
|
| 324 |
-
score = f"{e['score']:.2f}"
|
| 325 |
-
color = pick_color(lab, lbl2color)
|
| 326 |
-
table_html += (
|
| 327 |
-
"<tr>"
|
| 328 |
-
f"<td style='border:1px solid #ccc;padding:6px;background-color:{color};'>{token}</td>"
|
| 329 |
-
f"<td style='border:1px solid #ccc;padding:6px;background-color:{color};'>{short_lab}</td>"
|
| 330 |
-
f"<td style='border:1px solid #ccc;padding:6px;'>{score}</td>"
|
| 331 |
-
"</tr>"
|
| 332 |
-
)
|
| 333 |
-
table_html += "</table>"
|
| 334 |
-
|
| 335 |
-
styled_html1 = f"<div style='font-family:sans-serif;line-height:1.6;'>{output1}</div>"
|
| 336 |
-
styled_html2 = f"<div style='font-family:sans-serif;line-height:1.6;'>{output2}</div>"
|
| 337 |
-
return sorted_results1, styled_html1, table_html, styled_html2
|
| 338 |
-
# except Exception as e:
|
| 339 |
-
# # Force the real error into the Space logs
|
| 340 |
-
# import traceback, sys
|
| 341 |
-
# traceback.print_exc(file=sys.stderr)
|
| 342 |
-
# # Also show something in the UI so you know it’s the worker, not Gradio
|
| 343 |
-
# return f"<pre>{html.escape(repr(e))}</pre>", "", ""
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
with gr.Blocks(title="SNACS Tagging", theme="light") as demo:
|
| 347 |
-
with gr.Row():
|
| 348 |
-
description = gr.HTML(DESCR_TOP)
|
| 349 |
-
|
| 350 |
-
with gr.Row():
|
| 351 |
-
with gr.Column():
|
| 352 |
-
para1 = gr.HTML(DESCR_PARA_1)
|
| 353 |
-
with gr.Column():
|
| 354 |
-
para2 = gr.HTML(DESCR_PARA_2)
|
| 355 |
-
|
| 356 |
-
with gr.Row():
|
| 357 |
-
description = gr.HTML(DESCR_PART_3)
|
| 358 |
-
|
| 359 |
-
with gr.Row():
|
| 360 |
-
with gr.Column():
|
| 361 |
-
input_text = gr.Textbox(lines=4, placeholder="Enter a sentence...", label="Input Text"),
|
| 362 |
-
tag_btn = gr.Button("Tag!", variant="primary")
|
| 363 |
-
examples = gr.Examples(EXAMPLES, input_text, example_labels=EXAMPLE_LABELS)
|
| 364 |
-
with gr.Column() as output:
|
| 365 |
-
with gr.Tab("Simple Output"):
|
| 366 |
-
simple_output = gr.HighlightedText(label="Tagged Text")
|
| 367 |
-
with gr.Tab("Detailed Output"):
|
| 368 |
-
output1 = gr.HTML(label="SNACS Tagged Sentence")
|
| 369 |
-
output2 = gr.HTML(label="SNACS Table with Colored Labels")
|
| 370 |
-
output3 = gr.HTML(label="SNACS Tagged Sentence with No Label Aggregation")
|
| 371 |
-
|
| 372 |
-
examples.outputs = [simple_output,output1,output2,output3]
|
| 373 |
-
tag_btn.click(fn=classify_tokens, inputs=input_text, outputs=[simple_output,output1,output2,output3])
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
demo.launch()
|
|
|
|
| 1 |
+
import html
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import spaces
|
| 4 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification, TokenClassificationPipeline
|
| 5 |
+
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
#Description text for the Gradio interface
|
| 9 |
+
DESCR_TOP = """
|
| 10 |
+
<h1 style="text-align: center">SNACS Tagging</h1>
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
DESCR_PART_3 = """
|
| 14 |
+
<details><summary>Linguistic notes</summary>
|
| 15 |
+
<ul>
|
| 16 |
+
<li>Some of the tagged items are single words (like <b><i>to</i></b>); others are multiword expressions (like <b><i>according to</i></b>).</li>
|
| 17 |
+
<li>Possessive markers and possessive pronouns are tagged.</li>
|
| 18 |
+
<li>The English infinitive marker <b><i>to</i></b> is tagged if it marks a purpose.</li>
|
| 19 |
+
<li>Phrasal verb particles (like <b><i>up</i></b> in <b><i>give up</i></b>) are not tagged if the meaning is idiomatic.
|
| 20 |
+
However, words like <b><i>up</i></b>, <b><i>away</i></b>, and <b><i>together</i></b> are tagged if the meaning is spatial
|
| 21 |
+
(“The bird flew <b><i>away</i></b>”).</li>
|
| 22 |
+
</ul>
|
| 23 |
+
</details>
|
| 24 |
+
|
| 25 |
+
<p>Try the examples below, or enter your own text in the box and click the Tag! button.
|
| 26 |
+
</p>
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
DESCR_PARA_1 = """<p>🌐 Enter text <b>in any language</b> to analyze the in-context meanings of adpositions/possessives/case markers.
|
| 30 |
+
An <b>adposition</b> is a <i>pre</i>position (that precedes a noun, as in English) or a <i>post</i>position (that follows a noun, as in Japanese).
|
| 31 |
+
The tagger adds semantic labels from the SNACS tagset to indicate spatial, temporal, and other kinds of relationships.
|
| 32 |
+
See the <a href="https://www.xposition.org/">Xposition site</a> and <a href="https://arxiv.org/abs/1704.02134">PDF manual</a> for details.</p>
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
DESCR_PARA_2 = """<p>🤖 The tagger is a machine learning <a href="https://github.com/WesScivetti/snacs/tree/main">system</a> (specifically XLM-RoBERTa-large)
|
| 36 |
+
that has been fine-tuned on manually tagged data in 5 target languages: English, Mandarin Chinese, Hindi, Gujarati, and Japanese.
|
| 37 |
+
The system output is not always correct (even if the model’s confidence estimate is close to 100%),
|
| 38 |
+
and will likely be less accurate beyond the target languages.</p>
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
# short labels shown on the buttons, long text inserted into the textbox
|
| 42 |
+
EXAMPLES = [
|
| 43 |
+
['When Mr. and Mrs. Dursley woke up on the dull, gray Tuesday our story starts, there was nothing about the cloudy sky outside to suggest that strange and mysterious things would soon be happening all over the country. Mr. Dursley hummed as he picked out his most boring tie for work, and Mrs. Dursley gossiped away happily as she wrestled a screaming Dudley into his high chair.'], # inserts "example 1" text
|
| 44 |
+
['Humpty Dumpty was sitting, with his legs crossed like a Turk, on the top of a high wall — such a narrow one that Alice quite wondered how he could keep his balance — and, as his eyes were steadily fixed in the opposite direction, and he didn\'t take the least notice of her, she thought he must be a stuffed figure, after all.'],
|
| 45 |
+
['In West Philadelphia born and raised\nOn the playground is where I spent most of my days\nChillin\' out, maxin\', relaxin\' all cool\nAnd all shootin\' some b-ball outside of the school\nWhen a couple of guys who were up to no good\nStarted makin\' trouble in my neighborhood\nI got in one little fight and my mom got scared\nAnd said "You\'re movin\' with your auntie and uncle in Bel-Air"'],
|
| 46 |
+
['En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lantejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas, con sus pantuflos de lo mesmo, y los días de entresemana se honraba con su vellorí de lo más fino.']
|
| 47 |
+
]
|
| 48 |
+
EXAMPLE_LABELS = ['Harry Potter (en)', 'Through the Looking Glass (en)', 'Fresh Prince of Bel-Air (en)', 'Don Quixote (es)']
|
| 49 |
+
|
| 50 |
+
class MyPipeline(TokenClassificationPipeline):
|
| 51 |
+
"""Custom Pipeline class with custom postprocess function, designed to output proability distribution in addition to top scores
|
| 52 |
+
Inherits from HF TokenClassificationPipeline"""
|
| 53 |
+
def postprocess(self, all_outputs, aggregation_strategy="none", ignore_labels=None):
|
| 54 |
+
try:
|
| 55 |
+
from transformers.pipelines.token_classification import AggregationStrategy
|
| 56 |
+
if isinstance(aggregation_strategy, AggregationStrategy):
|
| 57 |
+
aggregation_strategy = aggregation_strategy.name.lower()
|
| 58 |
+
except Exception:
|
| 59 |
+
pass
|
| 60 |
+
if isinstance(aggregation_strategy, str):
|
| 61 |
+
aggregation_strategy = aggregation_strategy.lower()
|
| 62 |
+
|
| 63 |
+
if ignore_labels is None:
|
| 64 |
+
ignore_labels = ["O"]
|
| 65 |
+
|
| 66 |
+
id2label = self.model.config.id2label
|
| 67 |
+
label_ids_sorted = sorted(int(k) for k in id2label.keys()) if isinstance(id2label, dict) else list(range(len(id2label)))
|
| 68 |
+
labels_sorted = [id2label[i] for i in label_ids_sorted]
|
| 69 |
+
|
| 70 |
+
def _softmax(logits):
|
| 71 |
+
maxes = np.max(logits, axis=-1, keepdims=True)
|
| 72 |
+
shifted = logits - maxes
|
| 73 |
+
exp = np.exp(shifted)
|
| 74 |
+
return exp / exp.sum(axis=-1, keepdims=True)
|
| 75 |
+
|
| 76 |
+
all_pre_entities = []
|
| 77 |
+
all_grouped_entities = []
|
| 78 |
+
|
| 79 |
+
word_to_chars_map = all_outputs[0].get("word_to_chars_map")
|
| 80 |
+
sentence = all_outputs[0]["sentence"]
|
| 81 |
+
|
| 82 |
+
for model_outputs in all_outputs:
|
| 83 |
+
if self.framework == "pt" and model_outputs["logits"][0].dtype in (torch.bfloat16, torch.float16):
|
| 84 |
+
logits = model_outputs["logits"][0].to(torch.float32).numpy()
|
| 85 |
+
else:
|
| 86 |
+
logits = model_outputs["logits"][0].numpy()
|
| 87 |
+
|
| 88 |
+
scores = _softmax(logits)
|
| 89 |
+
|
| 90 |
+
input_ids = model_outputs["input_ids"][0]
|
| 91 |
+
offset_mapping = model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None
|
| 92 |
+
special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy()
|
| 93 |
+
word_ids = model_outputs.get("word_ids")
|
| 94 |
+
|
| 95 |
+
if self.framework == "tf":
|
| 96 |
+
input_ids = input_ids.numpy()
|
| 97 |
+
offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None
|
| 98 |
+
|
| 99 |
+
pre_entities = self.gather_pre_entities(
|
| 100 |
+
sentence,
|
| 101 |
+
input_ids,
|
| 102 |
+
scores,
|
| 103 |
+
offset_mapping,
|
| 104 |
+
special_tokens_mask,
|
| 105 |
+
aggregation_strategy, # string is fine
|
| 106 |
+
word_ids=word_ids,
|
| 107 |
+
word_to_chars_map=word_to_chars_map,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
grouped_entities = self.aggregate(pre_entities, aggregation_strategy)
|
| 111 |
+
grouped_entities = [
|
| 112 |
+
e for e in grouped_entities
|
| 113 |
+
if e.get("entity", None) not in ignore_labels
|
| 114 |
+
and e.get("entity_group", None) not in ignore_labels
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
all_pre_entities.extend(pre_entities)
|
| 118 |
+
all_grouped_entities.extend(grouped_entities)
|
| 119 |
+
|
| 120 |
+
if len(all_outputs) > 1:
|
| 121 |
+
all_grouped_entities = self.aggregate_overlapping_entities(all_grouped_entities)
|
| 122 |
+
|
| 123 |
+
def token_pred_label_id(token_scores: np.ndarray) -> int:
|
| 124 |
+
return int(token_scores.argmax())
|
| 125 |
+
|
| 126 |
+
def label_from_entity_dict(ent: dict) -> str | None:
|
| 127 |
+
if "entity_group" in ent and ent["entity_group"] is not None:
|
| 128 |
+
return ent["entity_group"]
|
| 129 |
+
if "entity" in ent and ent["entity"] is not None:
|
| 130 |
+
tag = ent["entity"]
|
| 131 |
+
if tag.startswith(("B-","I-")):
|
| 132 |
+
return tag[2:]
|
| 133 |
+
return tag
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
def spans_overlap(a_start, a_end, b_start, b_end):
|
| 137 |
+
if a_start is None or a_end is None or b_start is None or b_end is None:
|
| 138 |
+
return False
|
| 139 |
+
return max(a_start, b_start) < min(a_end, b_end)
|
| 140 |
+
|
| 141 |
+
pre_tokens = []
|
| 142 |
+
for pe in all_pre_entities:
|
| 143 |
+
pre_tokens.append({
|
| 144 |
+
"start": pe.get("start"),
|
| 145 |
+
"end": pe.get("end"),
|
| 146 |
+
"index": pe.get("index"),
|
| 147 |
+
"scores": pe.get("scores"),
|
| 148 |
+
"pred_id": token_pred_label_id(pe.get("scores")),
|
| 149 |
+
"pred_label": id2label[token_pred_label_id(pe.get("scores"))]
|
| 150 |
+
})
|
| 151 |
+
|
| 152 |
+
def average_probs(token_list):
|
| 153 |
+
if not token_list:
|
| 154 |
+
return None
|
| 155 |
+
arr = np.stack([t["scores"] for t in token_list], axis=0)
|
| 156 |
+
avg = np.nanmean(arr, axis=0)
|
| 157 |
+
s = float(avg.sum())
|
| 158 |
+
if s > 0:
|
| 159 |
+
avg = avg / s
|
| 160 |
+
return avg
|
| 161 |
+
|
| 162 |
+
results_with_probs = []
|
| 163 |
+
for ent in all_grouped_entities:
|
| 164 |
+
ent_start = ent.get("start")
|
| 165 |
+
ent_end = ent.get("end")
|
| 166 |
+
ent_tag = label_from_entity_dict(ent)
|
| 167 |
+
|
| 168 |
+
if aggregation_strategy == "none":
|
| 169 |
+
idx = ent.get("index")
|
| 170 |
+
candidate_tokens = [t for t in pre_tokens if t["index"] == idx] if idx is not None else []
|
| 171 |
+
else:
|
| 172 |
+
overlapping = [t for t in pre_tokens if spans_overlap(ent_start, ent_end, t["start"], t["end"])]
|
| 173 |
+
def strip_bi(lbl): return lbl[2:] if lbl.startswith(("B-","I-")) else lbl
|
| 174 |
+
if ent_tag is not None:
|
| 175 |
+
overlapping = [t for t in overlapping if strip_bi(t["pred_label"]) == ent_tag]
|
| 176 |
+
candidate_tokens = overlapping
|
| 177 |
+
|
| 178 |
+
avg = average_probs(candidate_tokens)
|
| 179 |
+
if avg is None:
|
| 180 |
+
probs_vec = np.zeros((len(labels_sorted),), dtype=float)
|
| 181 |
+
ent_label = ent.get("entity") or ent.get("entity_group")
|
| 182 |
+
if ent_label and ent_label.startswith(("B-","I-")):
|
| 183 |
+
ent_label = ent_label[2:]
|
| 184 |
+
chosen_i = None
|
| 185 |
+
for i, lab in enumerate(labels_sorted):
|
| 186 |
+
base = lab[2:] if lab.startswith(("B-","I-")) else lab
|
| 187 |
+
if ent_label == base:
|
| 188 |
+
chosen_i = i
|
| 189 |
+
break
|
| 190 |
+
probs_vec[chosen_i if chosen_i is not None else 0] = 1.0
|
| 191 |
+
else:
|
| 192 |
+
probs_vec = avg
|
| 193 |
+
|
| 194 |
+
ent["probabilities"] = {labels_sorted[i]: float(probs_vec[i]) for i in range(len(labels_sorted))}
|
| 195 |
+
results_with_probs.append(ent)
|
| 196 |
+
|
| 197 |
+
return results_with_probs
|
| 198 |
+
|
| 199 |
+
@spaces.GPU
|
| 200 |
+
def classify_tokens(text: str):
|
| 201 |
+
"""Main function for SNACS text classification that is called in the huggingface space
|
| 202 |
+
Input: string to be tagged
|
| 203 |
+
Output: HTML styled rendering of tagged outputs
|
| 204 |
+
styled_html1: HTML output with entities grouped
|
| 205 |
+
table_html: Labels from output one along with confidence scores
|
| 206 |
+
styled_html2: HTML output of labels for raw tokenized output (no grouping of subwords or entities). Top 5 label scores are displayed."""
|
| 207 |
+
|
| 208 |
+
PALETTE = [ # "#1f77b4",
|
| 209 |
+
"#ff7f0e",
|
| 210 |
+
"#2ca02c",
|
| 211 |
+
"#d62728",
|
| 212 |
+
"#9467bd",
|
| 213 |
+
# "#8c564b",
|
| 214 |
+
"#e377c2",
|
| 215 |
+
# "#7f7f7f",
|
| 216 |
+
"#cccc00", # tweaked
|
| 217 |
+
"#17becf",
|
| 218 |
+
"#aec7e8",
|
| 219 |
+
"#ffbb78",
|
| 220 |
+
"#c49c94",
|
| 221 |
+
"#c5b0d5",
|
| 222 |
+
"#98df8a",
|
| 223 |
+
"#ff9896",
|
| 224 |
+
"#f7b6d2",
|
| 225 |
+
# "#c7c7c7",
|
| 226 |
+
"#f7f777", # tweaked
|
| 227 |
+
"#9edae5"
|
| 228 |
+
][::-1] # reverse-sort to put the lighter colors first
|
| 229 |
+
|
| 230 |
+
model_name = "WesScivetti/SNACS_Multilingual"
|
| 231 |
+
|
| 232 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 233 |
+
model = AutoModelForTokenClassification.from_pretrained(model_name, torch_dtype=torch.float16 if torch.cuda.is_available() else None)
|
| 234 |
+
# ONE pipeline; override aggregation per-call
|
| 235 |
+
pipe = MyPipeline(
|
| 236 |
+
model=model,
|
| 237 |
+
tokenizer=tokenizer,
|
| 238 |
+
device=0,
|
| 239 |
+
framework="pt"
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
results_simple = pipe(text, aggregation_strategy="simple") # output #1
|
| 243 |
+
results_none = pipe(text, aggregation_strategy="none", ignore_labels=[]) # output #2 (per-token + probabilities)
|
| 244 |
+
print(results_none)
|
| 245 |
+
|
| 246 |
+
# sort
|
| 247 |
+
sorted_results1 = sorted(results_simple, key=lambda x: x["start"])
|
| 248 |
+
sorted_results2 = sorted(results_none, key=lambda x: x["start"])
|
| 249 |
+
|
| 250 |
+
# color helper that tolerates B-/I- prefixes
|
| 251 |
+
def pick_color(label: str, lbl2color: dict) -> str:
|
| 252 |
+
base = label[2:] if label.startswith(("B-", "I-")) else label
|
| 253 |
+
if base in lbl2color:
|
| 254 |
+
color = lbl2color[base]
|
| 255 |
+
elif base == "O":
|
| 256 |
+
color = "#b0adac"
|
| 257 |
+
lbl2color[base] = color
|
| 258 |
+
else:
|
| 259 |
+
color = PALETTE[len(lbl2color) % len(PALETTE)]
|
| 260 |
+
lbl2color[base] = color
|
| 261 |
+
return color
|
| 262 |
+
|
| 263 |
+
def display_label(label: str) -> str:
|
| 264 |
+
"""Simplified version of the label to display, removing "p." prefix and un-duplicating supersenses"""
|
| 265 |
+
lab = label.replace("p.", "")
|
| 266 |
+
lab1, lab2 = lab.split("-")
|
| 267 |
+
if lab1==lab2:
|
| 268 |
+
lab = lab1
|
| 269 |
+
else:
|
| 270 |
+
lab = lab1 + "~>" + lab2
|
| 271 |
+
return lab
|
| 272 |
+
|
| 273 |
+
# ---------- Output 1: SIMPLE (grouped spans) ----------
|
| 274 |
+
output1, last_idx = "", 0
|
| 275 |
+
lbl2color = {}
|
| 276 |
+
for e in sorted_results1:
|
| 277 |
+
s, t = e["start"], e["end"]
|
| 278 |
+
lab = e["entity_group"] # grouped results use entity_group
|
| 279 |
+
short_lab = display_label(lab)
|
| 280 |
+
score = e["score"]
|
| 281 |
+
word = html.escape(text[s:t])
|
| 282 |
+
output1 += html.escape(text[last_idx:s])
|
| 283 |
+
color = pick_color(lab, lbl2color)
|
| 284 |
+
tooltip = f"{short_lab} ({score:.2f})"
|
| 285 |
+
word_with_label = f"{word}"
|
| 286 |
+
output1 += (
|
| 287 |
+
f"<span style='background-color:{color};padding:2px;border-radius:4px;' "
|
| 288 |
+
f"title='{tooltip}'>{word_with_label}</span>"
|
| 289 |
+
)
|
| 290 |
+
last_idx = t
|
| 291 |
+
output1 += html.escape(text[last_idx:])
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
output2, last_idx2 = "", 0
|
| 295 |
+
for e in sorted_results2:
|
| 296 |
+
s, t = e["start"], e["end"]
|
| 297 |
+
lab = e["entity"] # NONE returns `entity`
|
| 298 |
+
probs = e["probabilities"]
|
| 299 |
+
word = html.escape(text[s:t])
|
| 300 |
+
output2 += html.escape(text[last_idx2:s])
|
| 301 |
+
color = pick_color(lab, lbl2color)
|
| 302 |
+
|
| 303 |
+
top5 = sorted(probs.items(), key=lambda kv: kv[1], reverse=True)[:5]
|
| 304 |
+
top5_lines = [f"{html.escape(k)}: {v:.2%}" for k, v in top5]
|
| 305 |
+
tooltip = "Top-5 " + " ".join(top5_lines)
|
| 306 |
+
|
| 307 |
+
word_with_label = f"{word}_{html.escape(lab)}"
|
| 308 |
+
output2 += (
|
| 309 |
+
f"<span style='background-color:{color};padding:2px;border-radius:4px;' "
|
| 310 |
+
f"title='{tooltip}'>{word_with_label}</span>"
|
| 311 |
+
)
|
| 312 |
+
last_idx2 = t
|
| 313 |
+
output2 += html.escape(text[last_idx2:])
|
| 314 |
+
|
| 315 |
+
# (table can use results_simple)
|
| 316 |
+
table_html = "<table style='border-collapse:collapse;font-family:sans-serif;'>"
|
| 317 |
+
table_html += "<tr><th style='border:1px solid #ccc;padding:6px;'>Token</th>"
|
| 318 |
+
table_html += "<th style='border:1px solid #ccc;padding:6px;'>SNACS Label</th>"
|
| 319 |
+
table_html += "<th style='border:1px solid #ccc;padding:6px;'>Confidence</th></tr>"
|
| 320 |
+
for e in sorted_results1:
|
| 321 |
+
token = html.escape(e["word"])
|
| 322 |
+
lab = e["entity_group"]
|
| 323 |
+
short_lab = display_label(lab)
|
| 324 |
+
score = f"{e['score']:.2f}"
|
| 325 |
+
color = pick_color(lab, lbl2color)
|
| 326 |
+
table_html += (
|
| 327 |
+
"<tr>"
|
| 328 |
+
f"<td style='border:1px solid #ccc;padding:6px;background-color:{color};'>{token}</td>"
|
| 329 |
+
f"<td style='border:1px solid #ccc;padding:6px;background-color:{color};'>{short_lab}</td>"
|
| 330 |
+
f"<td style='border:1px solid #ccc;padding:6px;'>{score}</td>"
|
| 331 |
+
"</tr>"
|
| 332 |
+
)
|
| 333 |
+
table_html += "</table>"
|
| 334 |
+
|
| 335 |
+
styled_html1 = f"<div style='font-family:sans-serif;line-height:1.6;'>{output1}</div>"
|
| 336 |
+
styled_html2 = f"<div style='font-family:sans-serif;line-height:1.6;'>{output2}</div>"
|
| 337 |
+
return sorted_results1, styled_html1, table_html, styled_html2
|
| 338 |
+
# except Exception as e:
|
| 339 |
+
# # Force the real error into the Space logs
|
| 340 |
+
# import traceback, sys
|
| 341 |
+
# traceback.print_exc(file=sys.stderr)
|
| 342 |
+
# # Also show something in the UI so you know it’s the worker, not Gradio
|
| 343 |
+
# return f"<pre>{html.escape(repr(e))}</pre>", "", ""
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
with gr.Blocks(title="SNACS Tagging", theme="light") as demo:
|
| 347 |
+
with gr.Row():
|
| 348 |
+
description = gr.HTML(DESCR_TOP)
|
| 349 |
+
|
| 350 |
+
with gr.Row():
|
| 351 |
+
with gr.Column():
|
| 352 |
+
para1 = gr.HTML(DESCR_PARA_1)
|
| 353 |
+
with gr.Column():
|
| 354 |
+
para2 = gr.HTML(DESCR_PARA_2)
|
| 355 |
+
|
| 356 |
+
with gr.Row():
|
| 357 |
+
description = gr.HTML(DESCR_PART_3)
|
| 358 |
+
|
| 359 |
+
with gr.Row():
|
| 360 |
+
with gr.Column():
|
| 361 |
+
input_text = gr.Textbox(lines=4, placeholder="Enter a sentence...", label="Input Text"),
|
| 362 |
+
tag_btn = gr.Button("Tag!", variant="primary")
|
| 363 |
+
examples = gr.Examples(EXAMPLES, input_text, example_labels=EXAMPLE_LABELS)
|
| 364 |
+
with gr.Column() as output:
|
| 365 |
+
with gr.Tab("Simple Output"):
|
| 366 |
+
simple_output = gr.HighlightedText(label="Tagged Text")
|
| 367 |
+
with gr.Tab("Detailed Output"):
|
| 368 |
+
output1 = gr.HTML(label="SNACS Tagged Sentence")
|
| 369 |
+
output2 = gr.HTML(label="SNACS Table with Colored Labels")
|
| 370 |
+
output3 = gr.HTML(label="SNACS Tagged Sentence with No Label Aggregation")
|
| 371 |
+
|
| 372 |
+
examples.outputs = [simple_output,output1,output2,output3]
|
| 373 |
+
tag_btn.click(fn=classify_tokens, inputs=input_text, outputs=[simple_output,output1,output2,output3])
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
demo.launch()
|
requirements.txt
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
-
transformers>=4.39
|
| 2 |
-
torch>=2.1
|
| 3 |
-
gradio>=4.30
|
| 4 |
-
spaces>=0.10
|
| 5 |
-
huggingface_hub>=0.22
|
| 6 |
-
numpy>=1.24
|
| 7 |
accelerate>=0.28
|
|
|
|
| 1 |
+
transformers>=4.39
|
| 2 |
+
torch>=2.1
|
| 3 |
+
gradio>=4.30
|
| 4 |
+
spaces>=0.10
|
| 5 |
+
huggingface_hub>=0.22
|
| 6 |
+
numpy>=1.24
|
| 7 |
accelerate>=0.28
|