Spaces:
Paused
Paused
debatelab-admin
commited on
Commit
β’
73c1565
1
Parent(s):
3d64989
Update app.
Browse files- README.md +1 -1
- aaac_util.py +588 -0
- app.py +464 -0
- requirements.txt +5 -0
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: π
|
4 |
colorFrom: gray
|
5 |
colorTo: gray
|
|
|
1 |
---
|
2 |
+
title: DeepA2 Demo
|
3 |
emoji: π
|
4 |
colorFrom: gray
|
5 |
colorTo: gray
|
aaac_util.py
ADDED
@@ -0,0 +1,588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### utility for T5-aaac
|
2 |
+
import re
|
3 |
+
import ast
|
4 |
+
import logging
|
5 |
+
from string import Template
|
6 |
+
import random
|
7 |
+
|
8 |
+
import pyparsing as pp
|
9 |
+
import z3
|
10 |
+
|
11 |
+
PREM_ARG_SCHEMES = {
|
12 |
+
'modus ponens': 2,
|
13 |
+
'chain rule': 2,
|
14 |
+
'adjunction': 2,
|
15 |
+
'case analysis': 3,
|
16 |
+
'disjunctive syllogism': 2,
|
17 |
+
'biconditional elimination': 1,
|
18 |
+
'instantiation': 1,
|
19 |
+
'hypothetical syllogism': 2,
|
20 |
+
'generalized biconditional elimination': 1,
|
21 |
+
'generalized adjunction': 2,
|
22 |
+
'generalized dilemma': 3,
|
23 |
+
'generalized disjunctive syllogism': 2
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
util_logger = logging.getLogger('transformer_tools.util.t5_util')
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
#######################################
|
32 |
+
# Layouter class #
|
33 |
+
#######################################
|
34 |
+
|
35 |
+
# Defines how to present AAAC raw data to model (as text)
|
36 |
+
class AAACLayouter:
|
37 |
+
|
38 |
+
PRED_CHARS = "FGHIJKLMNOPQRSTUVWABCDE"
|
39 |
+
ENT_CHARS = "abcdeklmnopqrstuwfgh"
|
40 |
+
def substitutions():
|
41 |
+
substitutions = {"F"+str(i+1):AAACLayouter.PRED_CHARS[i]+" " for i in range(20)}
|
42 |
+
substitutions.update({"a"+str(i+1):AAACLayouter.ENT_CHARS[i] for i in range(20)})
|
43 |
+
return substitutions
|
44 |
+
|
45 |
+
|
46 |
+
MASK_STRING = "??"
|
47 |
+
|
48 |
+
# defines how to present reason and conclusion statements to the model
|
49 |
+
def format_statements_list(statements:list, mask_prob:float=0.0) -> str:
|
50 |
+
if len(statements)==0:
|
51 |
+
return "None"
|
52 |
+
def ref_reco(sdict):
|
53 |
+
r = "(%s)" % sdict['ref_reco'] if random.random()>mask_prob else AAACLayouter.MASK_STRING
|
54 |
+
return r
|
55 |
+
list_as_string = ["%s (ref: %s)" % (sdict['text'].lower(),ref_reco(sdict)) for sdict in statements]
|
56 |
+
list_as_string = " | ".join(list_as_string)
|
57 |
+
return list_as_string
|
58 |
+
|
59 |
+
# defines how to present argdown premise and conclusion statements to the model
|
60 |
+
def format_ad_statements_list(statements:list, mask_prob:float=0.0) -> str:
|
61 |
+
if len(statements)==0:
|
62 |
+
return "None"
|
63 |
+
def ref_reco(sdict):
|
64 |
+
r = "(%s)" % sdict['ref_reco'] if random.random()>mask_prob else AAACLayouter.MASK_STRING
|
65 |
+
return r
|
66 |
+
def explicit(sdict):
|
67 |
+
r = str(sdict['explicit']) if random.random()>mask_prob else AAACLayouter.MASK_STRING
|
68 |
+
return r
|
69 |
+
list_as_string = ["%s (ref: %s explicit: %s)" % (sdict['text'].lower(),ref_reco(sdict),explicit(sdict)) for sdict in statements]
|
70 |
+
list_as_string = " | ".join(list_as_string)
|
71 |
+
return list_as_string
|
72 |
+
|
73 |
+
# defines how to present formalizations to the model
|
74 |
+
def format_formalizations_list(formalizations:list, mask_prob:float=0.0) -> str:
|
75 |
+
if len(formalizations)==0:
|
76 |
+
return "None"
|
77 |
+
def ref_reco(sdict):
|
78 |
+
r = "(%s)" % sdict['ref_reco'] if random.random()>mask_prob else AAACLayouter.MASK_STRING
|
79 |
+
return r
|
80 |
+
def fform(sdict):
|
81 |
+
t = Template(sdict['form'])
|
82 |
+
r = t.substitute(AAACLayouter.substitutions())
|
83 |
+
r = r.replace("Β¬","not ")
|
84 |
+
return r
|
85 |
+
list_as_string = ["%s (ref: %s)" % (fform(sdict),ref_reco(sdict)) for sdict in formalizations]
|
86 |
+
list_as_string = " | ".join(list_as_string)
|
87 |
+
return list_as_string
|
88 |
+
|
89 |
+
# defines how to present formalizations to the model
|
90 |
+
def format_plcd_subs(plcd_subs:dict, mask_prob:float=0.0) -> str:
|
91 |
+
if len(plcd_subs.keys())==0:
|
92 |
+
return "None"
|
93 |
+
def mask(s):
|
94 |
+
return s if random.random()>mask_prob else AAACLayouter.MASK_STRING
|
95 |
+
list_as_string = ["%s: %s" % (AAACLayouter.substitutions()[k],mask(v.lower())) for k,v in plcd_subs.items()]
|
96 |
+
list_as_string = " | ".join(list_as_string)
|
97 |
+
return list_as_string
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
# defines how to present argdown-snippet to the model
|
102 |
+
def format_argdown(argdown: str, mask_prob:float=0.0) -> str:
|
103 |
+
# pattern = r"({.*uses: \[[\s\d,]*\]})" # matches yaml metadata inline blocks in inference patterns
|
104 |
+
pattern = r"--\nwith ([^{}]*)({[^{}]*})"
|
105 |
+
matches = re.findall(pattern, argdown)
|
106 |
+
for match in matches:
|
107 |
+
m = match[1].replace('uses:','"uses":')
|
108 |
+
m = m.replace('variant:','"variant":')
|
109 |
+
d = ast.literal_eval(m)
|
110 |
+
subst = ""
|
111 |
+
mask_b = random.random()<mask_prob
|
112 |
+
if mask_b:
|
113 |
+
subst= "?? "
|
114 |
+
elif "variant" in d:
|
115 |
+
subst = "(%s) " % ", ".join(d['variant'])
|
116 |
+
subst = subst + "from " + " ".join(["(%s)" % i for i in d['uses']])
|
117 |
+
if mask_b:
|
118 |
+
argdown = argdown.replace(match[0]+match[1],subst)
|
119 |
+
else:
|
120 |
+
argdown = argdown.replace(match[1],subst)
|
121 |
+
argdown = argdown.replace("\n"," ") # replace line breaks
|
122 |
+
argdown = argdown.lower()
|
123 |
+
return argdown
|
124 |
+
|
125 |
+
#######################################
|
126 |
+
# Parser classes #
|
127 |
+
#######################################
|
128 |
+
|
129 |
+
class AAACParser:
|
130 |
+
def parse_proposition_block(ad_raw:str,inf_args:dict=None):
|
131 |
+
if not ad_raw:
|
132 |
+
return []
|
133 |
+
ad_raw = ad_raw
|
134 |
+
if ad_raw[0]!=" ":
|
135 |
+
ad_raw = " "+ad_raw
|
136 |
+
regex = r" \(([0-9]*)\) " # match labels
|
137 |
+
proposition_list = []
|
138 |
+
if not re.match(regex,ad_raw):
|
139 |
+
return proposition_list
|
140 |
+
matches = re.finditer(regex, ad_raw, re.MULTILINE)
|
141 |
+
label = -1
|
142 |
+
pointer = -1
|
143 |
+
for matchNum, match in enumerate(matches, start=1):
|
144 |
+
if label>-1:
|
145 |
+
proposition = {
|
146 |
+
"text":ad_raw[pointer:match.start()].strip(),
|
147 |
+
"label":label,
|
148 |
+
"uses": [],
|
149 |
+
"scheme": "",
|
150 |
+
"variants":[]
|
151 |
+
}
|
152 |
+
proposition_list.append(proposition)
|
153 |
+
label = int(match.group(1))
|
154 |
+
pointer = match.end()
|
155 |
+
if label>-1:
|
156 |
+
proposition = {'text':ad_raw[pointer:].strip() ,'label':label,"uses": [],"scheme": "","variants":[]}
|
157 |
+
proposition_list.append(proposition)
|
158 |
+
if proposition_list and inf_args:
|
159 |
+
proposition_list[0].update(inf_args)
|
160 |
+
return proposition_list
|
161 |
+
|
162 |
+
def parse_variants(variants_raw)->list:
|
163 |
+
if not variants_raw:
|
164 |
+
return []
|
165 |
+
regex = r"(?! )[^\(\),]+"
|
166 |
+
matches = re.finditer(regex, str(variants_raw), re.MULTILINE)
|
167 |
+
return [match.group() for match in matches]
|
168 |
+
|
169 |
+
def parse_uses(uses_raw)->list:
|
170 |
+
if not uses_raw:
|
171 |
+
return []
|
172 |
+
regex = r"\(([0-9]+)\)"
|
173 |
+
matches = re.finditer(regex, str(uses_raw), re.MULTILINE)
|
174 |
+
return [int(match.group(1)) for match in matches]
|
175 |
+
|
176 |
+
def preprocess_ad(ad_raw:str):
|
177 |
+
ad_raw = ad_raw.replace("\n"," ")
|
178 |
+
ad_raw = ad_raw.replace(" "," ")
|
179 |
+
ad_raw = ad_raw.replace("with?? ","with ?? ")
|
180 |
+
return ad_raw
|
181 |
+
|
182 |
+
def parse_argdown_block(ad_raw:str):
|
183 |
+
ad_raw = AAACParser.preprocess_ad(ad_raw)
|
184 |
+
regex = r"-- with ([^\(\)]*)( \([^-\(\))]*\))? from ([\(\), 0-9]+) --" # matches inference patterns
|
185 |
+
proposition_list = []
|
186 |
+
inf_args = None
|
187 |
+
pointer = 0
|
188 |
+
matches = re.finditer(regex, ad_raw, re.MULTILINE)
|
189 |
+
for matchNum, match in enumerate(matches, start=1):
|
190 |
+
# parse all propositions before inference matched
|
191 |
+
propositions = AAACParser.parse_proposition_block(ad_raw[pointer:match.start()],inf_args=inf_args)
|
192 |
+
if not propositions:
|
193 |
+
return None
|
194 |
+
proposition_list.extend(propositions)
|
195 |
+
# update pointer and inf_args to be used for parsing next propositions block
|
196 |
+
pointer = match.end()
|
197 |
+
inf_args = {
|
198 |
+
'scheme': match.group(1),
|
199 |
+
'variants': AAACParser.parse_variants(match.group(2)),
|
200 |
+
'uses': AAACParser.parse_uses(match.group(3))
|
201 |
+
}
|
202 |
+
if pointer > 0:
|
203 |
+
propositions = AAACParser.parse_proposition_block(ad_raw[pointer:],inf_args=inf_args)
|
204 |
+
proposition_list.extend(propositions)
|
205 |
+
return proposition_list
|
206 |
+
|
207 |
+
def parse_statements(statements_raw:str):
|
208 |
+
if not statements_raw:
|
209 |
+
return None
|
210 |
+
statements = []
|
211 |
+
if statements_raw.strip()=="None":
|
212 |
+
return statements
|
213 |
+
list_raw = statements_raw.split(" | ")
|
214 |
+
regex = r" \(ref: (?:\(([0-9]+)\)|\?\?)\)$"
|
215 |
+
for s in list_raw:
|
216 |
+
match = re.search(regex, s)
|
217 |
+
if not match:
|
218 |
+
return None
|
219 |
+
item = {
|
220 |
+
'text':s[:match.start()],
|
221 |
+
'ref_reco':int(match.group(1)) if match.group(1) else match.group(1)
|
222 |
+
}
|
223 |
+
statements.append(item)
|
224 |
+
return statements
|
225 |
+
|
226 |
+
def parse_formalizations(forms_raw:str):
|
227 |
+
parsed = AAACParser.parse_statements(forms_raw)
|
228 |
+
if not parsed:
|
229 |
+
return None
|
230 |
+
formalizations = []
|
231 |
+
for d in parsed:
|
232 |
+
d['form'] = d.pop('text')
|
233 |
+
formalizations.append(d)
|
234 |
+
|
235 |
+
# post-process: cleanup "β"
|
236 |
+
for f in formalizations:
|
237 |
+
form = f['form']
|
238 |
+
form = form.replace("β","")
|
239 |
+
form = form.replace(" "," ")
|
240 |
+
form = form.strip()
|
241 |
+
f['form'] = form
|
242 |
+
|
243 |
+
return formalizations
|
244 |
+
|
245 |
+
def parse_plcd_subs(subs_raw:str):
|
246 |
+
if not subs_raw:
|
247 |
+
return None
|
248 |
+
plcd_subs = []
|
249 |
+
if subs_raw.strip()=="None":
|
250 |
+
return plcd_subs
|
251 |
+
list_raw = subs_raw.split(" | ")
|
252 |
+
regex = r"^(..?):\s(.+)"
|
253 |
+
for s in list_raw:
|
254 |
+
match = re.search(regex, s)
|
255 |
+
if not match:
|
256 |
+
return None
|
257 |
+
k = match.group(1)
|
258 |
+
# comment out reverse substitution
|
259 |
+
#if k in AAACLayouter.PRED_CHARS:
|
260 |
+
# k = 'F'+str(1+AAACLayouter.PRED_CHARS.index(k))
|
261 |
+
#if k in AAACLayouter.ENT_CHARS:
|
262 |
+
# k = 'a'+str(1+AAACLayouter.ENT_CHARS.index(k))
|
263 |
+
item = {k: match.group(2)}
|
264 |
+
plcd_subs.append(item)
|
265 |
+
return plcd_subs
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
#######################################
|
271 |
+
# Logic Evaluator Class #
|
272 |
+
#######################################
|
273 |
+
|
274 |
+
class AAACLogicEvaluator():
|
275 |
+
|
276 |
+
def __init__(
|
277 |
+
self,
|
278 |
+
nl_schemes = None,
|
279 |
+
domains = None,
|
280 |
+
**kwargs
|
281 |
+
):
|
282 |
+
|
283 |
+
self.nl_schemes_re = []
|
284 |
+
if nl_schemes:
|
285 |
+
self.nl_schemes_re = nl_schemes.copy()
|
286 |
+
for item in self.nl_schemes_re:
|
287 |
+
item['scheme'] = [self.construct_regex(s) for s in item['scheme']]
|
288 |
+
|
289 |
+
# construct de-paraphrasing rules from domain-config-file
|
290 |
+
self.de_paraphrasing_rules = {}
|
291 |
+
if domains:
|
292 |
+
for domain in domains.get('domains'):
|
293 |
+
rules = {}
|
294 |
+
for k,v in domain.get("paraphrases",{}).items():
|
295 |
+
rules.update({repl.lower():k.lower() for repl in v}) # all paraphrasing rules are cast as lower case
|
296 |
+
self.de_paraphrasing_rules[domain['id']] = rules
|
297 |
+
|
298 |
+
|
299 |
+
def construct_regex(self,statement:str):
|
300 |
+
regex = r"( a )?\$\{([A-Za-z])\}"
|
301 |
+
regex_template = ""
|
302 |
+
pointer = 0
|
303 |
+
matches = re.finditer(regex, statement, re.MULTILINE)
|
304 |
+
for matchNum, match in enumerate(matches):
|
305 |
+
regex_template += statement[pointer:match.start()].lower()
|
306 |
+
neg_la = statement[match.end():match.end()+5] if match.end()+5<=len(statement) else statement[match.end():]
|
307 |
+
if match.group(1):
|
308 |
+
regex_template += " an? "
|
309 |
+
regex_template += "(?P<%s%s>.*(?!%s)*)"%(match.group(2),matchNum,neg_la)
|
310 |
+
pointer = match.end()
|
311 |
+
regex_template += statement[pointer:].lower()
|
312 |
+
return regex_template
|
313 |
+
|
314 |
+
|
315 |
+
def parse_inference_as_scheme(
|
316 |
+
self,
|
317 |
+
argument:list = None,
|
318 |
+
nl_scheme_re:dict = None
|
319 |
+
):
|
320 |
+
# recursively try to match premises to scheme, i.e. recursively construct a consistent mapping
|
321 |
+
# mapping maps sentences in
|
322 |
+
# matching contains all matches found so far
|
323 |
+
def match_premises(matching:list=None, mapping:dict=None):
|
324 |
+
#print("match_premises:" + str(mapping))
|
325 |
+
unmapped_formulas = [i for i in range(len(argument)) if not i in mapping.keys()]
|
326 |
+
unmapped_premises = [j for j in range(len(argument)) if not j in mapping.values()]
|
327 |
+
for i in unmapped_formulas:
|
328 |
+
for j in unmapped_premises:
|
329 |
+
try:
|
330 |
+
match = re.match(nl_scheme_re['scheme'][i], argument[j])
|
331 |
+
except IndexError:
|
332 |
+
match=False
|
333 |
+
if match:
|
334 |
+
matching[i]=match
|
335 |
+
mapping[i]=j
|
336 |
+
if any(m==None for m in matching):
|
337 |
+
full_match = match_premises(
|
338 |
+
matching = matching,
|
339 |
+
mapping = mapping
|
340 |
+
)
|
341 |
+
else:
|
342 |
+
full_match = matching_consistent(matching)
|
343 |
+
if full_match:
|
344 |
+
return True
|
345 |
+
else:
|
346 |
+
full_match = False
|
347 |
+
|
348 |
+
return full_match
|
349 |
+
|
350 |
+
# check whether a mapping is consistent with respect to placeholders
|
351 |
+
def matching_consistent(matching:list):
|
352 |
+
if any(m==None for m in matching):
|
353 |
+
return False
|
354 |
+
def group_by_name(match=None, group_name=None):
|
355 |
+
try:
|
356 |
+
g=match.group(group_name)
|
357 |
+
except IndexError:
|
358 |
+
g=None
|
359 |
+
return g
|
360 |
+
all_plcds = (nl_scheme_re["predicate-placeholders"]+nl_scheme_re["entity-placeholders"])
|
361 |
+
for plcd in all_plcds:
|
362 |
+
all_subst = []
|
363 |
+
for i in range(10):
|
364 |
+
group_name = plcd+str(i)
|
365 |
+
subst = [group_by_name(match=match, group_name=group_name) for match in matching]
|
366 |
+
subst = [x for x in subst if x != None]
|
367 |
+
all_subst.extend(subst)
|
368 |
+
if len(set(all_subst))>1:
|
369 |
+
return False
|
370 |
+
return True
|
371 |
+
|
372 |
+
c_match = re.match(nl_scheme_re['scheme'][-1], argument[-1])
|
373 |
+
if c_match:
|
374 |
+
try:
|
375 |
+
g=c_match.group("F")
|
376 |
+
except IndexError:
|
377 |
+
g="None"
|
378 |
+
|
379 |
+
if c_match:
|
380 |
+
full_match = match_premises(
|
381 |
+
matching = [None]*(len(argument)-1)+[c_match],
|
382 |
+
mapping = {len(argument)-1:len(argument)-1}
|
383 |
+
)
|
384 |
+
else:
|
385 |
+
full_match = False
|
386 |
+
|
387 |
+
return full_match
|
388 |
+
|
389 |
+
|
390 |
+
def parse_inference_as_base_scheme(
|
391 |
+
self,
|
392 |
+
argument:list = None,
|
393 |
+
base_scheme_group:str = None,
|
394 |
+
domain:str = None
|
395 |
+
):
|
396 |
+
variant = None
|
397 |
+
matches = False
|
398 |
+
# make the entire argument lower case
|
399 |
+
argument = [item.lower() for item in argument]
|
400 |
+
|
401 |
+
argument = self.de_paraphrase(argument, domain=domain) if domain else argument
|
402 |
+
for nl_scheme_re in self.nl_schemes_re:
|
403 |
+
if nl_scheme_re['base_scheme_group'] == base_scheme_group:
|
404 |
+
matches = self.parse_inference_as_scheme(
|
405 |
+
argument = argument,
|
406 |
+
nl_scheme_re = nl_scheme_re
|
407 |
+
)
|
408 |
+
if matches:
|
409 |
+
variant = nl_scheme_re['scheme_variant']
|
410 |
+
break
|
411 |
+
|
412 |
+
return matches, variant
|
413 |
+
|
414 |
+
|
415 |
+
def de_paraphrase(self, argument:list = None, domain:str = None):
|
416 |
+
rules = {}
|
417 |
+
if domain in self.de_paraphrasing_rules:
|
418 |
+
rules = self.de_paraphrasing_rules[domain]
|
419 |
+
for i,statement in enumerate(argument):
|
420 |
+
s = statement
|
421 |
+
for k,v in rules.items():
|
422 |
+
s = s.replace(k,v)
|
423 |
+
argument[i] = s
|
424 |
+
return argument
|
425 |
+
|
426 |
+
|
427 |
+
def parse_string_formula(self, formula:str):
|
428 |
+
atom = pp.Regex("[A-Z]\s[a-u|w-z]").setName("atom")
|
429 |
+
expr = pp.infixNotation(atom,[
|
430 |
+
("not", 1, pp.opAssoc.RIGHT, ),
|
431 |
+
("&", 2, pp.opAssoc.LEFT, ),
|
432 |
+
("v", 2, pp.opAssoc.LEFT, ),
|
433 |
+
("->", 2, pp.opAssoc.LEFT, ),
|
434 |
+
("<->", 2, pp.opAssoc.LEFT, )
|
435 |
+
])
|
436 |
+
try:
|
437 |
+
parsed = expr.parseString(formula,parseAll=True)[0]
|
438 |
+
except pp.ParseException as e:
|
439 |
+
parsed = None
|
440 |
+
return parsed
|
441 |
+
|
442 |
+
|
443 |
+
def c_bf(self,parse_tree):
|
444 |
+
if not parse_tree:
|
445 |
+
return None
|
446 |
+
functions = {}
|
447 |
+
constants = {}
|
448 |
+
Object = z3.DeclareSort('Object')
|
449 |
+
bin_op = {
|
450 |
+
"&": z3.And,
|
451 |
+
"v": z3.Or,
|
452 |
+
"->": z3.Implies
|
453 |
+
}
|
454 |
+
pt = parse_tree
|
455 |
+
if pt[0]=="not":
|
456 |
+
return z3.Not(self.c_bf(pt[1]))
|
457 |
+
if pt[1]=="<->":
|
458 |
+
return z3.And(z3.Implies(self.c_bf(pt[0]),self.c_bf(pt[2])),z3.Implies(self.c_bf(pt[2]),self.c_bf(pt[0])))
|
459 |
+
if pt[1] in bin_op.keys():
|
460 |
+
return bin_op[pt[1]](self.c_bf(pt[0]),self.c_bf(pt[2]))
|
461 |
+
# atom
|
462 |
+
pred = parse_tree[0]
|
463 |
+
if not pred in functions.keys(): # add predicate to dict if necessary
|
464 |
+
functions[pred] = z3.Function(pred, Object, z3.BoolSort())
|
465 |
+
const = parse_tree[-1]
|
466 |
+
if not const in constants.keys(): # add function to dict if necessary
|
467 |
+
functions[const] = z3.Const(const, Object)
|
468 |
+
return functions[pred](functions[const])
|
469 |
+
|
470 |
+
def to_z3(self,str_f:str):
|
471 |
+
if not str_f:
|
472 |
+
return None
|
473 |
+
f = str_f.strip()
|
474 |
+
if f[:4] == "(x):":
|
475 |
+
Object = z3.DeclareSort('Object')
|
476 |
+
x = z3.Const('x', Object)
|
477 |
+
parsed = self.parse_string_formula(f[4:].strip())
|
478 |
+
if parsed:
|
479 |
+
return (z3.ForAll(x,self.c_bf(parsed)))
|
480 |
+
return None
|
481 |
+
parsed = self.parse_string_formula(f)
|
482 |
+
if parsed:
|
483 |
+
return self.c_bf(parsed)
|
484 |
+
return None
|
485 |
+
|
486 |
+
def check_deductive_validity(self,scheme:list):
|
487 |
+
premises = [self.to_z3(f) for f in scheme[:-1]] # premises
|
488 |
+
conclusion = self.to_z3(scheme[-1]) # conclusion
|
489 |
+
#print(theory)
|
490 |
+
if any(p==None for p in premises) or (conclusion==None):
|
491 |
+
return None
|
492 |
+
theory = premises # premises
|
493 |
+
theory.append(z3.Not(conclusion)) # negation of conclusion
|
494 |
+
s = z3.Solver()
|
495 |
+
s.add(theory)
|
496 |
+
#print(s.check())
|
497 |
+
valid = s.check()==z3.unsat
|
498 |
+
return valid
|
499 |
+
|
500 |
+
|
501 |
+
#######################################
|
502 |
+
# Main eval function #
|
503 |
+
#######################################
|
504 |
+
|
505 |
+
|
506 |
+
def ad_valid_syntax(argdown):
|
507 |
+
check = False
|
508 |
+
if argdown:
|
509 |
+
# consecutive labeling?
|
510 |
+
check = all(p['label']==i+1 for i,p in enumerate(argdown))
|
511 |
+
# no "--" as statement
|
512 |
+
check = check and all(item['text']!="--" for item in argdown)
|
513 |
+
return 1 if check else 0
|
514 |
+
|
515 |
+
def ad_last_st_concl(argdown):
|
516 |
+
check = len(argdown[-1]['uses'])>0 if argdown else False
|
517 |
+
return 1 if check else 0
|
518 |
+
|
519 |
+
# do all statements referenced in inference exist and do they occur before inference is drawn?
|
520 |
+
def used_prem_exist(argdown):
|
521 |
+
if not argdown: return None
|
522 |
+
check = True
|
523 |
+
previous_labels = []
|
524 |
+
for p in argdown:
|
525 |
+
if p['uses']:
|
526 |
+
exist = all((l in previous_labels) for l in p['uses'])
|
527 |
+
check = check and exist
|
528 |
+
if not check: return 0
|
529 |
+
previous_labels.append(p['label'])
|
530 |
+
return 1 if check else 0
|
531 |
+
|
532 |
+
def ad_redundant_prem(argdown):
|
533 |
+
if not argdown: return None
|
534 |
+
premises = [item['text'].strip() for item in argdown if not item['uses']]
|
535 |
+
check = len(premises)!=len(set(premises))
|
536 |
+
return 1 if check else 0
|
537 |
+
|
538 |
+
def ad_petitio(argdown):
|
539 |
+
if not argdown: return None
|
540 |
+
premises = [item['text'].strip() for item in argdown if not item['uses']]
|
541 |
+
conclusions = [item['text'].strip() for item in argdown if item['uses']]
|
542 |
+
check = any(p==c for p in premises for c in conclusions)
|
543 |
+
return 1 if check else 0
|
544 |
+
|
545 |
+
def prem_non_used(argdown):
|
546 |
+
if not argdown: return None
|
547 |
+
used_total = [l for p in argdown if p['uses'] for l in p['uses']]
|
548 |
+
non_used = [p['label'] for p in argdown if not p['label'] in used_total]
|
549 |
+
return len(non_used)-1
|
550 |
+
|
551 |
+
|
552 |
+
|
553 |
+
#######################################
|
554 |
+
# Evaluating reason_statements #
|
555 |
+
#######################################
|
556 |
+
|
557 |
+
def s_valid_syntax(output:list,raw:str=""):
|
558 |
+
return 1 if output!=None or raw.strip()=="None" else 0
|
559 |
+
|
560 |
+
def s_not_verb_quotes(output:list,source):
|
561 |
+
l = [s for s in output if not s['text'] in source]
|
562 |
+
return len(l)
|
563 |
+
|
564 |
+
def s_ord_me_subsseq(output:list,source):
|
565 |
+
source = source
|
566 |
+
check = True
|
567 |
+
for reason in output:
|
568 |
+
text = reason['text']
|
569 |
+
check = check and (text in source)
|
570 |
+
if not check: return check
|
571 |
+
source = source.replace(text,"",1)
|
572 |
+
return check
|
573 |
+
|
574 |
+
|
575 |
+
|
576 |
+
#######################################
|
577 |
+
# Evaluating r-c-a consistency #
|
578 |
+
#######################################
|
579 |
+
|
580 |
+
# test: no reason statements is contained in a conclusion statement and vice versa
|
581 |
+
def reason_concl_mutually_exclusive(reasons,concl):
|
582 |
+
check = True
|
583 |
+
for r in reasons:
|
584 |
+
for c in concl:
|
585 |
+
check = bool(not c['text'] in r['text'] and not r['text'] in c['text'])
|
586 |
+
if not check: return check
|
587 |
+
return check
|
588 |
+
|
app.py
ADDED
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Demo for T5 trained on multi-angular AAAC
|
2 |
+
|
3 |
+
import textwrap
|
4 |
+
import re
|
5 |
+
|
6 |
+
import streamlit as st
|
7 |
+
from spacy import displacy
|
8 |
+
import graphviz
|
9 |
+
import seaborn as sns
|
10 |
+
|
11 |
+
import aaac_util as aaac
|
12 |
+
|
13 |
+
from huggingface_hub.inference_api import InferenceApi
|
14 |
+
|
15 |
+
|
16 |
+
BOILER_PLATE = {
|
17 |
+
"Enter your story" : "",
|
18 |
+
"Enter question (optional)": "",
|
19 |
+
}
|
20 |
+
|
21 |
+
MODES = [
|
22 |
+
# informal analysis
|
23 |
+
{'id':'s => a','from':['argument_source'],'to':'argdown_reconstruction'},
|
24 |
+
{'id':'s+r => a','from':['argument_source','reason_statements'],'to':'argdown_reconstruction'},
|
25 |
+
{'id':'s+j => a','from':['argument_source','conclusion_statements'],'to':'argdown_reconstruction'},
|
26 |
+
{'id':'r+j => a','from':['reason_statements','conclusion_statements'],'to':'argdown_reconstruction'},
|
27 |
+
{'id':'s+r+j => a','from':['argument_source','reason_statements','conclusion_statements'],'to':'argdown_reconstruction'},
|
28 |
+
{'id':'s => r','from':['argument_source'],'to':'reason_statements'},
|
29 |
+
{'id':'s+a => r','from':['argument_source','argdown_reconstruction'],'to':'reason_statements'},
|
30 |
+
{'id':'s+j => r','from':['argument_source','conclusion_statements'],'to':'reason_statements'},
|
31 |
+
{'id':'s => j','from':['argument_source'],'to':'conclusion_statements'},
|
32 |
+
{'id':'s+a => j','from':['argument_source','argdown_reconstruction'],'to':'conclusion_statements'},
|
33 |
+
{'id':'s+r => j','from':['argument_source','reason_statements'],'to':'conclusion_statements'},
|
34 |
+
# extract premises and conclusions
|
35 |
+
{'id':'a => c','from':['argdown_reconstruction'],'to':'conclusion'},
|
36 |
+
{'id':'a => p','from':['argdown_reconstruction'],'to':'premises'},
|
37 |
+
# formalize
|
38 |
+
{'id':'p => f','from':['premises'],'to':'premises_formalized'},
|
39 |
+
{'id':'p+c+o => f','from':['premises','conclusion','conclusion_formalized'],'to':'premises_formalized'},
|
40 |
+
{'id':'c => o','from':['conclusion'],'to':'conclusion_formalized'},
|
41 |
+
{'id':'c+p+f => o','from':['conclusion','premises','premises_formalized'],'to':'conclusion_formalized'},
|
42 |
+
{'id':'p+f => k','from':['premises','premises_formalized'],'to':'plcd_subs'},
|
43 |
+
{'id':'c+o => k','from':['conclusion','conclusion_formalized'],'to':'plcd_subs'},
|
44 |
+
{'id':'p+f+c+o => k','from':['premises','premises_formalized','conclusion','conclusion_formalized'],'to':'plcd_subs'},
|
45 |
+
# re-reconstruct argument
|
46 |
+
{'id':'f+k => p','from':['premises_formalized','plcd_subs'],'to':'premises'},
|
47 |
+
{'id':'o+k => c','from':['conclusion_formalized','plcd_subs'],'to':'conclusion'},
|
48 |
+
{'id':'p+c => a','from':['premises','conclusion'],'to':'argdown_reconstruction'}
|
49 |
+
]
|
50 |
+
|
51 |
+
TEST_DATA = [
|
52 |
+
{"title":"Allergies (AAAC, 2 steps, 2 distractors, 1 implicit premise, 1 implicit intermediary conclusion)","argument_source":"Whoever is a sufferer of allergy to mango is not a sufferer of allergy to sesame or a sufferer of allergy to carrot. And no sufferer of allergy to carrot is hypersensitive to mango. Consequently, every sufferer of allergy to mango reacts allergically to turkey. Yet someone who is not a sufferer of allergy to mango and a sufferer of allergy to cheese is a sufferer of allergy to ginger and a sufferer of allergy to pepper. Plus, every person who is not both a sufferer of allergy to maize and a sufferer of allergy to mustard is a sufferer of allergy to cinnamon or a sufferer of allergy to oat.","argdown_reconstruction":"(1) If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to carrot, or not a sufferer of allergy to sesame.\n(2) If someone is a sufferer of allergy to carrot, then they are not a sufferer of allergy to mango.\n--\nwith generalized disjunctive syllogism (transposition, negation variant) from (1), (2)\n--\n(3) If someone is a sufferer of allergy to mango, then they are not a sufferer of allergy to sesame.\n(4) If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to sesame or a sufferer of allergy to turkey.\n--\nwith generalized disjunctive syllogism from (3), (4)\n--\n(5) If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to turkey.","reason_statements":[{"text":"Whoever is a sufferer of allergy to mango is not a sufferer of allergy to sesame or a sufferer of allergy to carrot","starts_at":0,"ref_reco":1},{"text":"no sufferer of allergy to carrot is hypersensitive to mango","starts_at":121,"ref_reco":2}],"conclusion_statements":[{"text":"every sufferer of allergy to mango reacts allergically to turkey","starts_at":196,"ref_reco":5}],"premises":[{"ref_reco":1,"text":"If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to carrot, or not a sufferer of allergy to sesame.","explicit":"true"},{"ref_reco":2,"text":"If someone is a sufferer of allergy to carrot, then they are not a sufferer of allergy to mango.","explicit":"true"},{"ref_reco":4,"text":"If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to sesame or a sufferer of allergy to turkey.","explicit":"false"}],"premises_formalized":[{"form":"(x): ${F1}x -> (${F4}x v \u00ac${F2}x)","ref_reco":1},{"form":"(x): ${F4}x -> \u00ac${F1}x","ref_reco":2},{"form":"(x): ${F1}x -> (${F2}x v ${F3}x)","ref_reco":4}],"conclusion":[{"ref_reco":5,"text":"If someone is a sufferer of allergy to mango, then they are a sufferer of allergy to turkey."}],"conclusion_formalized":[{"form":"(x): ${F1}x -> ${F3}x","ref_reco":5}],"intermediary_conclusions_formalized":[{"form":"(x): ${F1}x -> \u00ac${F2}x","ref_reco":3}],"intermediary_conclusions":[{"ref_reco":3,"text":"If someone is a sufferer of allergy to mango, then they are not a sufferer of allergy to sesame."}],"distractors":["Every person who is not both a sufferer of allergy to maize and a sufferer of allergy to mustard is a sufferer of allergy to cinnamon or a sufferer of allergy to oat.","Someone who is not a sufferer of allergy to mango and a sufferer of allergy to cheese is a sufferer of allergy to ginger and a sufferer of allergy to pepper."],"id":"8c2c3329-cab8-4bd1-b4e7-3ff26506be9d","predicate_placeholders":["F1","F2","F3","F4"],"entity_placeholders":[],"steps":2,"n_premises":3,"n_distractors":2,"base_scheme_groups":["generalized disjunctive syllogism"],"scheme_variants":["transposition","negation variant"],"domain_id":"allergies","domain_type":"persons","plcd_subs":{"F1":"sufferer of allergy to mango","F2":"sufferer of allergy to sesame","F3":"sufferer of allergy to turkey","F4":"sufferer of allergy to carrot"},"argdown_index_map":{"s0c":5,"s0p0":4,"s1c":3,"s1p1":2,"s2c":2,"s2p0":1,"s1p0":1},"presentation_parameters":{"resolve_steps":[1],"direction":"forward","implicit_conclusion":"false","implicit_premise":"true","redundancy_frequency":0.1,"drop_conj_frequency":0.1,"start_sentence":[0,2]}},
|
53 |
+
{"title":"Families (AAAC, 3 steps, 0 distractors, 1 implicit premise)","argument_source":"A person who is not a nephew of Richard is a half-brother of Lance or a son of Jeff, and vice versa. Hence, somebody who is not a nephew of Richard is a half-brother of Lance or a son of Jeff. We may conclude that nobody is neither a nephew of Richard nor a son of Jeff. All this entails that a person who is not a great-grandfather of David is a son of Jeff, owing to the fact that someone who is not a great-grandfather of David is not a nephew of Richard.","argdown_reconstruction":"(1) If, and only if, someone is not a nephew of Richard, then they are a half-brother of Lance or a son of Jeff.\n--\nwith generalized biconditional elimination (negation variant, complex variant) from (1)\n--\n(2) If someone is not a nephew of Richard, then they are a half-brother of Lance or a son of Jeff.\n(3) If someone is a half-brother of Lance, then they are a nephew of Richard.\n--\nwith generalized disjunctive syllogism (transposition, negation variant) from (2), (3)\n--\n(4) If someone is not a nephew of Richard, then they are a son of Jeff.\n(5) If someone is not a great-grandfather of David, then they are not a nephew of Richard.\n--\nwith hypothetical syllogism (negation variant) from (4), (5)\n--\n(6) If someone is not a great-grandfather of David, then they are a son of Jeff.","reason_statements":[{"text":"A person who is not a nephew of Richard is a half-brother of Lance or a son of Jeff, and vice versa","starts_at":0,"ref_reco":1},{"text":"someone who is not a great-grandfather of David is not a nephew of Richard","starts_at":383,"ref_reco":5}],"conclusion_statements":[{"text":"somebody who is not a nephew of Richard is a half-brother of Lance or a son of Jeff","starts_at":108,"ref_reco":2},{"text":"nobody is neither a nephew of Richard nor a son of Jeff","starts_at":214,"ref_reco":4},{"text":"a person who is not a great-grandfather of David is a son of Jeff","starts_at":293,"ref_reco":6}],"premises":[{"ref_reco":1,"text":"If, and only if, someone is not a nephew of Richard, then they are a half-brother of Lance or a son of Jeff.","explicit":"true"},{"ref_reco":3,"text":"If someone is a half-brother of Lance, then they are a nephew of Richard.","explicit":"false"},{"ref_reco":5,"text":"If someone is not a great-grandfather of David, then they are not a nephew of Richard.","explicit":"true"}],"premises_formalized":[{"form":"(x): \u00ac${F2}x <-> (${F4}x v ${F3}x)","ref_reco":1},{"form":"(x): ${F4}x -> ${F2}x","ref_reco":3},{"form":"(x): \u00ac${F1}x -> \u00ac${F2}x","ref_reco":5}],"conclusion":[{"ref_reco":6,"text":"If someone is not a great-grandfather of David, then they are a son of Jeff."}],"conclusion_formalized":[{"form":"(x): \u00ac${F1}x -> ${F3}x","ref_reco":6}],"intermediary_conclusions_formalized":[{"form":"(x): \u00ac${F2}x -> (${F4}x v ${F3}x)","ref_reco":2},{"form":"(x): \u00ac${F2}x -> ${F3}x","ref_reco":4}],"intermediary_conclusions":[{"ref_reco":2,"text":"If someone is not a nephew of Richard, then they are a half-brother of Lance or a son of Jeff."},{"ref_reco":4,"text":"If someone is not a nephew of Richard, then they are a son of Jeff."}],"distractors":[],"id":"5812d3a0-05d0-4e50-af62-416205f6ea22","predicate_placeholders":["F1","F2","F3","F4"],"entity_placeholders":[],"steps":3,"n_premises":3,"n_distractors":0,"base_scheme_groups":["hypothetical syllogism","generalized biconditional elimination","generalized disjunctive syllogism"],"scheme_variants":["transposition","negation variant","complex variant"],"domain_id":"male_relatives","domain_type":"persons","plcd_subs":{"F1":"great-grandfather of David","F2":"nephew of Richard","F3":"son of Jeff","F4":"half-brother of Lance"},"argdown_index_map":{"s0c":6,"s0p0":5,"s1c":4,"s1p1":3,"s2c":2,"s2p0":1},"presentation_parameters":{"resolve_steps":[],"direction":"forward","implicit_conclusion":"false","implicit_premise":"true","redundancy_frequency":0.1,"drop_conj_frequency":0.1,"start_sentence":[0,1]}},
|
54 |
+
{"title":"Football (AAAC, 4 steps, 0 distractors, 0 implicit premises, 3 implicit intermediary conclusions, implicit final conclusion)", "argument_source":"If, and only if, someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk. And everybody who hasn't expert knowledge about Kilmarnock FC doesn't criticize Besiktas JK, and no expert of Kilmarnock FC has expert knowledge about Tottenham Hotspur, and vice versa.","argdown_reconstruction":"(1) If, and only if, someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk.\n--\nwith generalized biconditional elimination (negation variant, complex variant) from (1)\n--\n(2) If someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk.\n(3) If, and only if, someone is an expert of Kilmarnock FC, then they are not an expert of Tottenham Hotspur.\n--\nwith generalized biconditional elimination (negation variant) from (3)\n--\n(4) If someone is an expert of Kilmarnock FC, then they are not an expert of Tottenham Hotspur.\n(5) If someone is not an expert of Kilmarnock FC, then they are not a critic of Besiktas JK.\n--\nwith hypothetical syllogism (transposition, negation variant) from (4), (5)\n--\n(6) If someone is a critic of Besiktas JK, then they are not an expert of Tottenham Hotspur.\n--\nwith generalized disjunctive syllogism from (2), (6)\n--\n(7) If someone is a critic of Besiktas JK, then they are a friend of KRC Genk.","reason_statements":[{"text":"If, and only if, someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk","starts_at":0,"ref_reco":1},{"text":"everybody who hasn't expert knowledge about Kilmarnock FC doesn't criticize Besiktas JK","starts_at":127,"ref_reco":5},{"text":"no expert of Kilmarnock FC has expert knowledge about Tottenham Hotspur, and vice versa","starts_at":220,"ref_reco":3}],"conclusion_statements":[],"premises":[{"ref_reco":1,"text":"If, and only if, someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk.","explicit":"true"},{"ref_reco":3,"text":"If, and only if, someone is an expert of Kilmarnock FC, then they are not an expert of Tottenham Hotspur.","explicit":"true"},{"ref_reco":5,"text":"If someone is not an expert of Kilmarnock FC, then they are not a critic of Besiktas JK.","explicit":"true"}],"premises_formalized":[{"form":"(x): ${F1}x <-> (${F2}x v ${F3}x)","ref_reco":1},{"form":"(x): ${F4}x <-> \u00ac${F2}x","ref_reco":3},{"form":"(x): \u00ac${F4}x -> \u00ac${F1}x","ref_reco":5}],"conclusion":[{"ref_reco":7,"text":"If someone is a critic of Besiktas JK, then they are a friend of KRC Genk."}],"conclusion_formalized":[{"form":"(x): ${F1}x -> ${F3}x","ref_reco":7}],"intermediary_conclusions_formalized":[{"form":"(x): ${F1}x -> (${F2}x v ${F3}x)","ref_reco":2},{"form":"(x): ${F4}x -> \u00ac${F2}x","ref_reco":4},{"form":"(x): ${F1}x -> \u00ac${F2}x","ref_reco":6}],"intermediary_conclusions":[{"ref_reco":2,"text":"If someone is a critic of Besiktas JK, then they are an expert of Tottenham Hotspur or a friend of KRC Genk."},{"ref_reco":4,"text":"If someone is an expert of Kilmarnock FC, then they are not an expert of Tottenham Hotspur."},{"ref_reco":6,"text":"If someone is a critic of Besiktas JK, then they are not an expert of Tottenham Hotspur."}],"distractors":[],"id":"ead34d89-af68-4add-bb62-caff9043c90f","predicate_placeholders":["F1","F2","F3","F4"],"entity_placeholders":[],"steps":4,"n_premises":3,"n_distractors":0,"base_scheme_groups":["hypothetical syllogism","generalized biconditional elimination","generalized disjunctive syllogism"],"scheme_variants":["transposition","negation variant","complex variant"],"domain_id":"football_fans","domain_type":"persons","plcd_subs":{"F1":"critic of Besiktas JK","F2":"expert of Tottenham Hotspur","F3":"friend of KRC Genk","F4":"expert of Kilmarnock FC"},"argdown_index_map":{"s0c":7,"s1c":2,"s1p1":2,"s2c":6,"s2p0":5,"s1p0":1,"s3c":4,"s3p0":3},"presentation_parameters":{"resolve_steps":[1,2,3],"direction":"backward","implicit_conclusion":"true","implicit_premise":"false","redundancy_frequency":0.1,"drop_conj_frequency":0.1,"start_sentence":[0,2]}}
|
55 |
+
]
|
56 |
+
|
57 |
+
GEN_CHAINS = [
|
58 |
+
{"id":"straight (with formalization)","modes":['s => a','s => r','s => j','a => c','a => p','c => o','p+c+o => f']},
|
59 |
+
{"id":"straight (without formalization)","modes":['s => a','s => r','s => j']},
|
60 |
+
{"id":"hermeneutic cycle 1","modes":['s => a','s+a => r','s+a => j','r+j => a','a => c','a => p','c => o','p+c+o => f']},
|
61 |
+
{"id":"hermeneutic cycle 2","modes":['s => a','s+a => r','s+a => j','s+r+j => a','s+a => r','s+a => j','s+r+j => a','a => c','a => p','c => o','p+c+o => f']},
|
62 |
+
{"id":"logical streamlining","modes":['s => a','a => p','a => c','c => o','c+o => k','o+k => c','p+c => a','a => c','a => p','c => o','p+c+o => f']},
|
63 |
+
]
|
64 |
+
|
65 |
+
INFERENCE_PARAMS = {
|
66 |
+
'max_length':450,
|
67 |
+
'clean_up_tokenization_spaces': False
|
68 |
+
}
|
69 |
+
|
70 |
+
HTML_WRAPPER = """<div style="overflow-x: auto; border: 1px solid #e6e9ef; border-radius: 0.25rem; padding: 1rem; margin-bottom: 2.5rem">{}</div>"""
|
71 |
+
|
72 |
+
CACHE_SIZE = 10000
|
73 |
+
|
74 |
+
def params(config):
|
75 |
+
pass
|
76 |
+
|
77 |
+
|
78 |
+
@st.cache(allow_output_mutation=True)
|
79 |
+
def build_inference_api():
|
80 |
+
inference = InferenceApi(
|
81 |
+
repo_id="debatelab/argument-analyst",
|
82 |
+
token=st.secrets['api_token']
|
83 |
+
)
|
84 |
+
return inference
|
85 |
+
#config.max_answer = 450
|
86 |
+
#config.max_seq_len = 450
|
87 |
+
#config.no_repeat_ngram_size = 0
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
@st.cache(allow_output_mutation=True)
|
92 |
+
def aaac_fields():
|
93 |
+
fields = []
|
94 |
+
for m in MODES:
|
95 |
+
fields = fields + m['from']
|
96 |
+
return set(sorted(fields))
|
97 |
+
|
98 |
+
# defines how to present reason and conclusion statements to the model
|
99 |
+
@st.cache(allow_output_mutation=True)
|
100 |
+
def format_statements_list(statements: list) -> str:
|
101 |
+
if len(statements)==0:
|
102 |
+
return "None"
|
103 |
+
list_as_string = ["%s (ref: (%s))" % (sdict['text'],sdict['ref_reco']) for sdict in statements]
|
104 |
+
list_as_string = " | ".join(list_as_string)
|
105 |
+
return list_as_string
|
106 |
+
|
107 |
+
# construct inference graph
|
108 |
+
#@st.cache(allow_output_mutation=True)
|
109 |
+
def get_inference_graph(argdown_parsed,colors):
|
110 |
+
premise_template = """<
|
111 |
+
<TABLE BORDER="0" COLOR="#444444" CELLPADDING="10" CELLSPACING="2">
|
112 |
+
<TR><TD BORDER="0" BGCOLOR="{bgcolor}" STYLE="rounded"><FONT FACE="sans serif" POINT-SIZE="12"><B>({label})</B> {text}</FONT></TD></TR>
|
113 |
+
</TABLE>
|
114 |
+
>"""
|
115 |
+
|
116 |
+
conclusion_template = """<
|
117 |
+
<TABLE BORDER="0" COLOR="#444444" CELLPADDING="10" CELLSPACING="2">
|
118 |
+
<TR><TD BORDER="1" BGCOLOR="white" CELLPADDING="4"><FONT FACE="sans serif" POINT-SIZE="10">{inference}</FONT></TD></TR>
|
119 |
+
<TR><TD BORDER="0" BGCOLOR="{bgcolor}" STYLE="rounded"><FONT FACE="sans serif" POINT-SIZE="12"><B>({label})</B> {text}</FONT></TD></TR>
|
120 |
+
</TABLE>
|
121 |
+
>"""
|
122 |
+
|
123 |
+
g = graphviz.Digraph()
|
124 |
+
g.attr(ratio="compress", size="6,10", orientation='portrait',overlay="compress")
|
125 |
+
for item in argdown_parsed:
|
126 |
+
text = textwrap.wrap("(X) "+item['text'], width=30)
|
127 |
+
text='<BR/>'.join(text)[4:]
|
128 |
+
#g.attr('node',shape='box', fillcolor='#40e0d0', style='filled')
|
129 |
+
g.attr('node',shape='plaintext')
|
130 |
+
if len(item['uses'])==0:
|
131 |
+
g.node(
|
132 |
+
'node%d'%item['label'],
|
133 |
+
premise_template.format(text=text,label=item['label'],bgcolor=colors.get('P%d'%item['label'],'white')),
|
134 |
+
tooltip=textwrap.fill(item['text'], width=30)
|
135 |
+
)
|
136 |
+
else:
|
137 |
+
inference = "with <I>"+item['scheme']+"</I>"
|
138 |
+
if len(item['variants'])>0:
|
139 |
+
inference += " ("+(", ".join(item['variants']))+"):"
|
140 |
+
inference = textwrap.wrap(inference, width=40)
|
141 |
+
inference='<BR/>'.join(inference)
|
142 |
+
g.node(
|
143 |
+
'node%d'%item['label'],
|
144 |
+
conclusion_template.format(
|
145 |
+
text=text,
|
146 |
+
label=item['label'],
|
147 |
+
bgcolor=colors.get('C%d'%item['label'],'white'),
|
148 |
+
inference=inference
|
149 |
+
),
|
150 |
+
tooltip=textwrap.fill(item['text'], width=30)
|
151 |
+
)
|
152 |
+
|
153 |
+
for i in item['uses']:
|
154 |
+
g.edge('node%d'%i,'node%d'%item['label'])
|
155 |
+
|
156 |
+
return g
|
157 |
+
|
158 |
+
|
159 |
+
# get entities for displacy
|
160 |
+
# @st.cache(allow_output_mutation=True)
|
161 |
+
def get_ds_entities(argument_source,s_parsed,type="reasons"):
|
162 |
+
if type=="reasons":
|
163 |
+
lab_templ="P%d"
|
164 |
+
color_profile = "mako_r"
|
165 |
+
elif type=="conclusions":
|
166 |
+
lab_templ="C%d"
|
167 |
+
color_profile = "rocket_r"
|
168 |
+
else:
|
169 |
+
return None,None
|
170 |
+
|
171 |
+
ents = []
|
172 |
+
colors = []
|
173 |
+
pointer = 0
|
174 |
+
for item in s_parsed:
|
175 |
+
reason = item['text']
|
176 |
+
if reason in argument_source:
|
177 |
+
idx_start = argument_source.index(reason, pointer)
|
178 |
+
idx_end = idx_start+len(reason)
|
179 |
+
pointer = idx_end
|
180 |
+
ents.append({
|
181 |
+
"start":idx_start, "end":idx_end, "label":lab_templ%item['ref_reco']
|
182 |
+
})
|
183 |
+
|
184 |
+
# construct colors for reason statements
|
185 |
+
palette = sns.color_palette(color_profile,round(3*len(ents))).as_hex()
|
186 |
+
colors = {ent["label"]:palette[i] for i,ent in enumerate(ents)}
|
187 |
+
|
188 |
+
return ents,colors
|
189 |
+
|
190 |
+
# format raw argdown (inserting line breaks)
|
191 |
+
@st.cache(allow_output_mutation=True)
|
192 |
+
def format_argdown(raw_argdown: str, colors=None) -> str:
|
193 |
+
if not raw_argdown:
|
194 |
+
return "No argument reconstruction to display."
|
195 |
+
all_colors = {('color('+str(i+1)+')'):'white' for i in range(20)}
|
196 |
+
if colors:
|
197 |
+
all_colors.update({('color('+k[1:]+')'):v for k,v in colors.items()})
|
198 |
+
|
199 |
+
def format_statement_block(s):
|
200 |
+
r = re.sub('(\([0-9]+\))', r'<br><b><span style="background-color:{color\1}">\1</span></b>', s)
|
201 |
+
r = r.format(**all_colors)
|
202 |
+
return r
|
203 |
+
format_inference_block = lambda s: "<br>--<br><i>"+s+"</i><br>--"
|
204 |
+
split = raw_argdown.split(' -- ')
|
205 |
+
argdown = format_statement_block(split[0])
|
206 |
+
i=1
|
207 |
+
while i<len(split):
|
208 |
+
argdown = argdown + format_inference_block(split[i])
|
209 |
+
if i<len(split)+1:
|
210 |
+
argdown = argdown + format_statement_block(split[i+1])
|
211 |
+
i = i+2
|
212 |
+
argdown = argdown[4:]# remove first linebreak
|
213 |
+
argdown = """<div style="font-family:monospace;font-size:14px">%s</div>"""%argdown
|
214 |
+
return argdown
|
215 |
+
|
216 |
+
# format formalization as markdown
|
217 |
+
@st.cache(allow_output_mutation=True)
|
218 |
+
def get_formalization_display(pform_parsed=None, cform_parsed=None):
|
219 |
+
# format premises and conclusion
|
220 |
+
premise_list = ['- `%s` (%d)'%(p['form'],p['ref_reco']) for p in pform_parsed]
|
221 |
+
premise_list = '\n'.join(premise_list)
|
222 |
+
conclusion = '- `%s` (%d)'%(cform_parsed[-1]['form'],cform_parsed[-1]['ref_reco'])
|
223 |
+
|
224 |
+
# check deductive validity
|
225 |
+
evaluator = aaac.AAACLogicEvaluator()
|
226 |
+
scheme = [p['form'] for p in pform_parsed]
|
227 |
+
scheme.append(cform_parsed[-1]['form'])
|
228 |
+
check = evaluator.check_deductive_validity(scheme)
|
229 |
+
if check==None:
|
230 |
+
eval_message = "(Couldn't parse formulas.)"
|
231 |
+
else:
|
232 |
+
check_token = "valid" if check else "invalid"
|
233 |
+
eval_message = "The inference from *premises* to *conclusion* is deductively **{check_token}**.".format(check_token=check_token)
|
234 |
+
|
235 |
+
# put everything together
|
236 |
+
display_formalization = """##### Premises:\n\n{premise_list}\n\n##### Conclusion:\n\n{conclusion}\n\n{eval_message}"""
|
237 |
+
display_formalization = display_formalization.format(premise_list=premise_list,conclusion=conclusion,eval_message=eval_message)
|
238 |
+
|
239 |
+
return display_formalization
|
240 |
+
|
241 |
+
|
242 |
+
def run_model(mode_set, user_input):
|
243 |
+
"""Main method for running the model
|
244 |
+
|
245 |
+
:param mode_set: the modes to run
|
246 |
+
:param user_input: the user input (dict)
|
247 |
+
:returns: output dict
|
248 |
+
"""
|
249 |
+
|
250 |
+
inference = build_inference_api()
|
251 |
+
|
252 |
+
current_input = user_input.copy()
|
253 |
+
output = []
|
254 |
+
|
255 |
+
for i,mode_id in enumerate(mode_set):
|
256 |
+
current_mode = next(m for m in MODES if m['id']==mode_id)
|
257 |
+
with st.spinner('Generating output %d of %d with mode %s'%(i,len(mode_set),mode_id)):
|
258 |
+
# construct prompt
|
259 |
+
inquire_prompt = ""
|
260 |
+
for from_key in current_mode['from']:
|
261 |
+
inquire_prompt = inquire_prompt + ("%s: %s " % (from_key,current_input[from_key]))
|
262 |
+
to_key = current_mode['to']
|
263 |
+
#inquire_prompt = inquire_prompt + to_key + ":" # comment out this line if custom prefix used
|
264 |
+
# inquire model
|
265 |
+
inputs = inquire_prompt
|
266 |
+
out = inference(inputs, INFERENCE_PARAMS)
|
267 |
+
out = out[0]['generated_text']
|
268 |
+
# cleanup formalization
|
269 |
+
if to_key in ['premises_formalized','conclusion_formalized']:
|
270 |
+
out = out.replace("β","")
|
271 |
+
out = out.replace(" "," ")
|
272 |
+
#out = out+"-Hellooo!"
|
273 |
+
|
274 |
+
# write output
|
275 |
+
output.append({
|
276 |
+
'step':i,
|
277 |
+
'mode':current_mode,
|
278 |
+
'output':out,
|
279 |
+
'prompt':inquire_prompt
|
280 |
+
})
|
281 |
+
# update input
|
282 |
+
current_input[to_key] = out
|
283 |
+
|
284 |
+
return output
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
def main():
|
290 |
+
|
291 |
+
#config = build_config()
|
292 |
+
#model = build_model(config)
|
293 |
+
st.set_page_config(layout="wide")
|
294 |
+
st.title("DeepA2 ArgumentAnalyst Demo")
|
295 |
+
|
296 |
+
## page details
|
297 |
+
|
298 |
+
# choose example data
|
299 |
+
ex_titles = [x['title'] for x in TEST_DATA]
|
300 |
+
ex_titles = ['...'] + ex_titles
|
301 |
+
ex_s = st.selectbox("1. Select an example...",ex_titles,index=0)
|
302 |
+
ex_item = TEST_DATA[ex_titles.index(ex_s)-1]
|
303 |
+
|
304 |
+
user_input = {}
|
305 |
+
|
306 |
+
# Source text
|
307 |
+
d = 'argument_source'
|
308 |
+
filler = d
|
309 |
+
if ex_s != '...':
|
310 |
+
filler = ex_item[d]
|
311 |
+
filler = filler.lower()
|
312 |
+
user_input[d] = st.text_area(
|
313 |
+
"...or enter a text to analyze (argument source):",filler,
|
314 |
+
height=250
|
315 |
+
)
|
316 |
+
if user_input[d]==d:
|
317 |
+
user_input[d] = ""
|
318 |
+
else:
|
319 |
+
user_input[d] = user_input[d].lower()
|
320 |
+
|
321 |
+
|
322 |
+
# modes
|
323 |
+
|
324 |
+
gen_chain_id_s = st.selectbox("2. Select a reconstruction strategy...",[x['id'] for x in GEN_CHAINS],index=0)
|
325 |
+
modes_s = next(x['modes'] for x in GEN_CHAINS if x['id']==gen_chain_id_s)
|
326 |
+
|
327 |
+
modes_s = st.multiselect(
|
328 |
+
"... or build a custom generative chain (argument source=`s`, reasons=`r`, conjectures=`j`, argdown reconstruction=`a`, premises=`p`, conclusion=`c`, premise formalization=`f`, conclusion formalization=`o`, keys=`k`):",
|
329 |
+
[m['id'] for m in MODES]*2,
|
330 |
+
modes_s#["s => a","s+a => r","s+a => j","a => c","a => p","c => o","p+c+o => f"]
|
331 |
+
)
|
332 |
+
|
333 |
+
# optional additional input
|
334 |
+
|
335 |
+
input_expander = st.beta_expander(label='Additional input (optional)')
|
336 |
+
with input_expander:
|
337 |
+
# for every mode, add input field
|
338 |
+
for d in [m for m in aaac_fields() if m!="argument_source"]:
|
339 |
+
filler = d
|
340 |
+
if ex_s != '...':
|
341 |
+
filler = ex_item[d]
|
342 |
+
if d in ['reason_statements','conclusion_statements','conclusion','premises']:
|
343 |
+
filler = aaac.AAACLayouter.format_statements_list(filler)
|
344 |
+
elif d in ['premises_formalized','conclusion_formalized']:
|
345 |
+
filler = aaac.AAACLayouter.format_formalizations_list(filler)
|
346 |
+
elif d in ['plcd_subs']:
|
347 |
+
filler = aaac.AAACLayouter.format_plcd_subs(filler)
|
348 |
+
|
349 |
+
user_input[d] = st.text_area(
|
350 |
+
d,filler,
|
351 |
+
height=250
|
352 |
+
)
|
353 |
+
|
354 |
+
if user_input[d]==d:
|
355 |
+
user_input[d] = ""
|
356 |
+
|
357 |
+
|
358 |
+
## answer a query
|
359 |
+
submit = st.button("Process")
|
360 |
+
|
361 |
+
#row = []; index = []
|
362 |
+
#row_der = []; index_der = []
|
363 |
+
|
364 |
+
|
365 |
+
if submit:
|
366 |
+
with st.spinner("Processing..."):
|
367 |
+
output = run_model(modes_s,user_input)
|
368 |
+
|
369 |
+
# get latest generated reasons, conclusions, argdown
|
370 |
+
argdown_raw = [out['output'] for out in output if out['mode']['to']=='argdown_reconstruction']
|
371 |
+
argdown_raw = argdown_raw[-1] if len(argdown_raw)>0 else None
|
372 |
+
reasons_raw = [out['output'] for out in output if out['mode']['to']=='reason_statements']
|
373 |
+
reasons_raw = reasons_raw[-1] if len(reasons_raw)>0 else None
|
374 |
+
concl_raw = [out['output'] for out in output if out['mode']['to']=='conclusion_statements']
|
375 |
+
concl_raw = concl_raw[-1] if len(concl_raw)>0 else None
|
376 |
+
pform_raw = [out['output'] for out in output if out['mode']['to']=='premises_formalized']
|
377 |
+
pform_raw = pform_raw[-1] if len(pform_raw)>0 else None
|
378 |
+
#pform_raw = "(x): F x -> (G x v H x) (ref: (1)) | (x): F x -> not G x (ref: (3))" # TEST
|
379 |
+
cform_raw = [out['output'] for out in output if out['mode']['to']=='conclusion_formalized']
|
380 |
+
cform_raw = cform_raw[-1] if len(cform_raw)>0 else None
|
381 |
+
#cform_raw = "(x): F x -> H x (ref: (4))" # TEST
|
382 |
+
|
383 |
+
# parse raw output
|
384 |
+
argdown_parsed = aaac.AAACParser.parse_argdown_block(argdown_raw) if argdown_raw else None
|
385 |
+
reasons_parsed = aaac.AAACParser.parse_statements(reasons_raw) if reasons_raw else None
|
386 |
+
concl_parsed = aaac.AAACParser.parse_statements(concl_raw) if concl_raw else None
|
387 |
+
pform_parsed = aaac.AAACParser.parse_formalizations(pform_raw) if pform_raw else None
|
388 |
+
cform_parsed = aaac.AAACParser.parse_formalizations(cform_raw) if cform_raw else None
|
389 |
+
|
390 |
+
|
391 |
+
# check syntactic validity
|
392 |
+
argdown_valid = (
|
393 |
+
aaac.ad_valid_syntax(argdown_parsed) &
|
394 |
+
aaac.ad_last_st_concl(argdown_parsed) &
|
395 |
+
aaac.used_prem_exist(argdown_parsed) #&
|
396 |
+
#1-aaac.prem_non_used(argdown_parsed)
|
397 |
+
) if argdown_parsed else False
|
398 |
+
reasons_valid = (
|
399 |
+
aaac.s_ord_me_subsseq(reasons_parsed,user_input['argument_source'])
|
400 |
+
) if reasons_parsed else False
|
401 |
+
concl_valid = (
|
402 |
+
aaac.s_ord_me_subsseq(concl_parsed,user_input['argument_source'])
|
403 |
+
) if concl_parsed else False
|
404 |
+
reasons_concl_mc = (
|
405 |
+
aaac.reason_concl_mutually_exclusive(reasons_parsed,concl_parsed)
|
406 |
+
) if reasons_parsed and concl_parsed else False
|
407 |
+
pform_valid = True if pform_parsed else False
|
408 |
+
cform_valid = True if cform_parsed else False
|
409 |
+
|
410 |
+
# get and merge entities and colors for displacy
|
411 |
+
ents = []
|
412 |
+
colors={}
|
413 |
+
if concl_valid:
|
414 |
+
ents,colors = get_ds_entities(user_input['argument_source'],concl_parsed,type="conclusions")
|
415 |
+
if reasons_valid and reasons_concl_mc:
|
416 |
+
ents_r,colors_r = get_ds_entities(user_input['argument_source'],reasons_parsed,type="reasons")
|
417 |
+
ents= ents+ents_r
|
418 |
+
colors.update(colors_r)
|
419 |
+
ents = sorted(ents, key=lambda item: item["start"])
|
420 |
+
elif reasons_valid:
|
421 |
+
ents,colors = get_ds_entities(user_input['argument_source'],reasons_parsed,type="reasons")
|
422 |
+
|
423 |
+
options = {"colors":colors}
|
424 |
+
ex = [{"text": user_input['argument_source'],"ents": ents,"title": None}]
|
425 |
+
displacy_html = displacy.render(ex, style="ent", options=options, manual=True)
|
426 |
+
|
427 |
+
graphviz_graph = get_inference_graph(argdown_parsed,colors) if argdown_valid else None
|
428 |
+
|
429 |
+
|
430 |
+
# Show output
|
431 |
+
col_source, col_reco = st.beta_columns(2)
|
432 |
+
with col_source:
|
433 |
+
st.markdown(f'<div style="font-size: small">Reasons and conclusions in source text</div>',unsafe_allow_html=True)
|
434 |
+
st.write(HTML_WRAPPER.format(displacy_html), unsafe_allow_html=True)
|
435 |
+
|
436 |
+
with col_reco:
|
437 |
+
ig_expander = st.beta_expander(label='Argument reconstruction (inference graph)', expanded=argdown_valid)
|
438 |
+
with ig_expander:
|
439 |
+
if argdown_valid:
|
440 |
+
st.graphviz_chart(graphviz_graph,use_container_width=True)
|
441 |
+
else:
|
442 |
+
st.write("No inference graph to display.")
|
443 |
+
lgc_expander = st.beta_expander(label='Formalization', expanded=(pform_valid and cform_valid))
|
444 |
+
with lgc_expander:
|
445 |
+
if pform_valid and cform_valid:
|
446 |
+
st.markdown(get_formalization_display(pform_parsed=pform_parsed, cform_parsed=cform_parsed))
|
447 |
+
else:
|
448 |
+
st.write("No formalization to display.")
|
449 |
+
ad_expander = st.beta_expander(label='Argument reconstruction (argdown snippet)', expanded=(not argdown_valid))
|
450 |
+
with ad_expander:
|
451 |
+
st.write(format_argdown(argdown_raw,colors), unsafe_allow_html=True)
|
452 |
+
|
453 |
+
history_expander = st.beta_expander(label='Full history', expanded=False)
|
454 |
+
with history_expander:
|
455 |
+
st.write(output)
|
456 |
+
|
457 |
+
#for out in output:
|
458 |
+
# st.write("step: %d, mode: %s" % (out['step'],out['mode']['id']))
|
459 |
+
# output_f = format_argdown(out['output']) if out['mode']['to']=='argdown_reconstruction' else out['output']
|
460 |
+
# st.write(output_f, unsafe_allow_html=True)
|
461 |
+
|
462 |
+
|
463 |
+
if __name__ == '__main__':
|
464 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
spacy==2.1.9
|
2 |
+
seaborn==0.11.1
|
3 |
+
graphviz==0.16
|
4 |
+
pyparsing==2.4.7
|
5 |
+
z3-solver==4.8.10.0
|