ggbetz commited on
Commit
a724493
1 Parent(s): e63a601
Files changed (1) hide show
  1. app.py +26 -20
app.py CHANGED
@@ -4,6 +4,7 @@ import json
4
  import textwrap
5
  import re
6
 
 
7
  import streamlit as st
8
  from spacy import displacy
9
  import graphviz
@@ -13,6 +14,7 @@ import seaborn as sns
13
  import aaac_util as aaac
14
 
15
 
 
16
 
17
  INTRO_TEXT = """This app let's you explore ArgumentAnalyst, a system for
18
  logical analysis and reconstruction of argumentative texts (DeepA2).
@@ -268,7 +270,9 @@ def run_model(mode_set, user_input):
268
  :returns: output dict
269
  """
270
 
 
271
  inference = build_inference_api()
 
272
 
273
  current_input = user_input.copy()
274
  output = []
@@ -283,26 +287,28 @@ def run_model(mode_set, user_input):
283
  inquire_prompt = inquire_prompt + (f"{to_key}: {from_key}: {current_input[from_key]}")
284
  # inquire model
285
  inputs = inquire_prompt
286
- attempts = 0
287
- out = None
288
- while not out and attempts<MAX_API_CALLS:
289
- attempts += 1
290
- try:
291
- # api call
292
- out = inference(inputs, params)
293
- if not isinstance(out, list):
294
- raise ValueError('Response is not a list.')
295
- except Exception:
296
- if attempts < MAX_API_CALLS:
297
- st.warning(
298
- f"HF Inference API call (attempt {attempts} of {MAX_API_CALLS}) has failed. Response: {out}. Trying again..."
299
- )
300
- out = None
301
- else:
302
- st.warning(
303
- f"HF Inference API call (attempt {attempts} of {MAX_API_CALLS}) has failed. Response: {out}. Stopping."
304
- )
305
- return None
 
 
306
  out = out[0]['generated_text']
307
  # cleanup formalization
308
  if to_key in ['premises_formalized','conclusion_formalized']:
 
4
  import textwrap
5
  import re
6
 
7
+ #from transformers import pipeline
8
  import streamlit as st
9
  from spacy import displacy
10
  import graphviz
 
14
  import aaac_util as aaac
15
 
16
 
17
+ MODEL = "debatelab/argument-analyst"
18
 
19
  INTRO_TEXT = """This app let's you explore ArgumentAnalyst, a system for
20
  logical analysis and reconstruction of argumentative texts (DeepA2).
 
270
  :returns: output dict
271
  """
272
 
273
+
274
  inference = build_inference_api()
275
+ #t2t_pipeline = pipeline("text2text-generation", model=MODEL)
276
 
277
  current_input = user_input.copy()
278
  output = []
 
287
  inquire_prompt = inquire_prompt + (f"{to_key}: {from_key}: {current_input[from_key]}")
288
  # inquire model
289
  inputs = inquire_prompt
290
+ if True:
291
+ attempts = 0
292
+ out = None
293
+ while not out and attempts<MAX_API_CALLS:
294
+ attempts += 1
295
+ try:
296
+ # api call
297
+ out = inference(inputs, INFERENCE_PARAMS)
298
+ if not isinstance(out, list):
299
+ raise ValueError('Response is not a list.')
300
+ except Exception:
301
+ if attempts < MAX_API_CALLS:
302
+ st.warning(
303
+ f"HF Inference API call (attempt {attempts} of {MAX_API_CALLS}) has failed. Response: {out}. Trying again..."
304
+ )
305
+ out = None
306
+ else:
307
+ st.warning(
308
+ f"HF Inference API call (attempt {attempts} of {MAX_API_CALLS}) has failed. Response: {out}. Stopping."
309
+ )
310
+ return None
311
+ #out = st.session_state.pipeline(inputs,**INFERENCE_PARAMS)
312
  out = out[0]['generated_text']
313
  # cleanup formalization
314
  if to_key in ['premises_formalized','conclusion_formalized']: