Spaces:
Runtime error
Runtime error
File size: 5,487 Bytes
e8b3788 549392b e8b3788 0e9979f 20a4051 e8b3788 900e333 111d053 591d4b5 e8b3788 900e333 e8b3788 591d4b5 1637d4b 5153e30 1637d4b 900e333 e8b3788 5153e30 e8b3788 111d053 1637d4b 900e333 aeecc33 900e333 e8b3788 900e333 e8b3788 056d3ef e8b3788 d3a7a20 e8b3788 900e333 e8b3788 591d4b5 e8b3788 591d4b5 e8b3788 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import gradio as gr
from typing import List
from qasem.end_to_end_pipeline import QASemEndToEndPipeline
pipeline = QASemEndToEndPipeline(spacy_model="en_core_web_lg")
description = f"""This is a demo of the QASem Parsing pipeline. It wraps models of three QA-based semantic tasks, composing a comprehensive semi-structured representation of sentence meaning - covering verbal and nominal semantic role labeling together with discourse relations."""
title="QASem Parsing Demo"
all_layers = ["qasrl", "qanom", "qadiscourse"]
examples = [["Both were shot in the confrontation with police and have been recovering in hospital since the attack .", all_layers, False, 0.75],
["the construction of the officer 's building was delayed by the lockdown and is expected to continue for at least 10 more months.", all_layers, False, 0.75],
["While President Obama expressed condolences regarding the death of Margaret Thatcher upon her death earlier this year , he did not issue an executive order that flags be lowered in her honor .", all_layers, False, 0.75],
["We made a very clear commitment : if there is any proposal in the next parliament for a transfer of powers to Brussels ( the EU ) we will have an in/out referendum .", all_layers, False, 0.75],
["The doctor asked about the progress in Luke 's treatment .", all_layers, False, 0.75],
["The Veterinary student was interested in Luke 's treatment of sea animals .", all_layers, False, 0.7],
["Some reviewers agreed that the criticism raised by the AC is mostly justified .", all_layers, False, 0.6]]
input_sent_box_label = "Insert sentence here, or select from the examples below"
links = """<p style='text-align: center'>
<a href='https://github.com/kleinay/QASem' target='_blank'>Github Repo</a> | <a href='https://arxiv.org/abs/2205.11413' target='_blank'>Paper</a>
</p>"""
def call(sentence, layers, show_openie: bool, detection_threshold: float):
outputs = pipeline([sentence], nominalization_detection_threshold=detection_threshold, output_openie=show_openie)
if show_openie:
openie_outputs = outputs["openie"][0] # list of OpenIE tuples
outputs = outputs["qasem"]
outputs = outputs[0] # only one sentence in input batch
def pretty_qadisc_qas(qa_infos) -> List[str]:
if not qa_infos: return []
return ["- " + f"{qa['question']} --- {qa['answer']}".lstrip()
for qa in qa_infos if qa is not None]
def pretty_qasrl_qas(pred_info) -> List[str]:
if not pred_info or not pred_info['QAs']: return []
return ["- " + f"{qa['question']} --- {';'.join(qa['answers'])}".lstrip()
for qa in pred_info['QAs'] if qa is not None]
# filter outputs by requested `layers`
outputs = {layer: qas if layer in layers else []
for layer, qas in outputs.items()}
# Prettify outputs
qasrl_qas = [qa for pred_info in outputs['qasrl'] for qa in pretty_qasrl_qas(pred_info)]
qanom_qas = [qa for pred_info in outputs['qanom'] for qa in pretty_qasrl_qas(pred_info)]
qadisc_qas= pretty_qadisc_qas(outputs['qadiscourse'])
all_qas = []
if "qasrl" in layers: all_qas += ['\nQASRL:'] + qasrl_qas
if "qanom" in layers: all_qas += ['\nQANom:'] + qanom_qas
if "qadiscourse" in layers: all_qas += ['\nQADiscourse:'] + qadisc_qas
if not qasrl_qas + qanom_qas + qadisc_qas:
pretty_qa_output = "NO QA GENERATED"
else:
pretty_qa_output = "\n".join(all_qas)
# also present highlighted predicates
qasrl_predicates = [pred_info['predicate_idx'] for pred_info in outputs['qasrl']]
qanom_predicates = [pred_info['predicate_idx'] for pred_info in outputs['qanom']]
def color(idx):
if idx in qasrl_predicates : return "aquamarine"
if idx in qanom_predicates : return "aqua"
def word_span(word, idx):
return f'<span style="background-color: {color(idx)}">{word}</span>'
html = '<span>' + ' '.join(word_span(word, idx) for idx, word in enumerate(sentence.split(" "))) + '</span>'
# show openie_outputs
if show_openie:
repr_oie = lambda tup: f"({','.join(e for e in tup)})"
openie_html = '<span><b>Open Information Extraction:</b><br>' + '<br>'.join([repr_oie(tup) for tup in openie_outputs]) + '</span>'
else:
openie_html = ''
return html, pretty_qa_output, openie_html, outputs
iface = gr.Interface(fn=call,
inputs=[gr.components.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4),
gr.components.CheckboxGroup(all_layers, value=all_layers, label="Annotation Layers"),
gr.components.Checkbox(value=False, label="Show OpenIE format (converted from verbal QASRL only)"),
gr.components.Slider(minimum=0., maximum=1., step=0.01, value=0.75, label="Nominalization Detection Threshold")],
outputs=[gr.components.HTML(label="Detected Predicates"),
gr.components.Textbox(label="Generated QAs"),
gr.components.HTML(label="OpenIE Output"),
gr.components.JSON(label="Raw QASemEndToEndPipeline Output")],
title=title,
description=description,
article=links,
examples=examples)
iface.launch() |