Meena commited on
Commit
f900238
1 Parent(s): 5dd67d8

Update app/tapas.py

Browse files
Files changed (1) hide show
  1. app/tapas.py +6 -15
app/tapas.py CHANGED
@@ -9,12 +9,7 @@ def load_model_and_tokenizer():
9
  """
10
  Load
11
  """
12
- # Load pretrained tokenizer: TAPAS finetuned on WikiTable Questions
13
- # tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
14
  tokenizer = AutoTokenizer.from_pretrained("Meena/table-question-answering-tapas")
15
-
16
- # Load pretrained model: TAPAS finetuned on WikiTable Questions
17
- # model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
18
  model = AutoModelForTableQuestionAnswering.from_pretrained("Meena/table-question-answering-tapas")
19
 
20
  # Return tokenizer and model
@@ -25,13 +20,8 @@ def prepare_inputs(table, queries, tokenizer):
25
  """
26
  Convert dictionary into data frame and tokenize inputs given queries.
27
  """
28
- # Prepare inputs
29
- # table = pd.DataFrame.from_dict(data)
30
- # table = netflix_df[['title', 'release_year', 'rating']].astype('str').head(50)
31
  table = table.astype('str').head(100)
32
  inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="pt")
33
-
34
- # Return things
35
  return table, inputs
36
 
37
 
@@ -83,7 +73,7 @@ def show_answers(queries, answers, aggregation_predictions_string):
83
  Visualize the postprocessed answers.
84
  """
85
  agg = {"NONE": lambda x: x, "SUM" : lambda x: sum(x), "AVERAGE": lambda x: (sum(x) / len(x)), "COUNT": lambda x: len(x)}
86
- result = ''
87
  for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):
88
  print(query)
89
  if predicted_agg == "NONE":
@@ -91,13 +81,14 @@ def show_answers(queries, answers, aggregation_predictions_string):
91
  else:
92
  if all([not p.match(val) == None for val in answer.split(', ')]):
93
  # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](list(map(float, answer.split(','))))))
94
- result = "Predicted answer: " + str(agg[predicted_agg](list(map(float, answer.split(',')))))
95
  elif predicted_agg == "COUNT":
96
  # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](answer.split(','))))
97
- result = "Predicted answer: " + str(agg[predicted_agg](answer.split(',')))
98
  else:
99
- result = "Predicted answer: " + predicted_agg + " > " + answer
100
- return result
 
101
 
102
  def execute_query(queries, table):
103
 
 
9
  """
10
  Load
11
  """
 
 
12
  tokenizer = AutoTokenizer.from_pretrained("Meena/table-question-answering-tapas")
 
 
 
13
  model = AutoModelForTableQuestionAnswering.from_pretrained("Meena/table-question-answering-tapas")
14
 
15
  # Return tokenizer and model
 
20
  """
21
  Convert dictionary into data frame and tokenize inputs given queries.
22
  """
 
 
 
23
  table = table.astype('str').head(100)
24
  inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="pt")
 
 
25
  return table, inputs
26
 
27
 
 
73
  Visualize the postprocessed answers.
74
  """
75
  agg = {"NONE": lambda x: x, "SUM" : lambda x: sum(x), "AVERAGE": lambda x: (sum(x) / len(x)), "COUNT": lambda x: len(x)}
76
+ results = []
77
  for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):
78
  print(query)
79
  if predicted_agg == "NONE":
 
81
  else:
82
  if all([not p.match(val) == None for val in answer.split(', ')]):
83
  # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](list(map(float, answer.split(','))))))
84
+ result = str(agg[predicted_agg](list(map(float, answer.split(',')))))
85
  elif predicted_agg == "COUNT":
86
  # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](answer.split(','))))
87
+ result = str(agg[predicted_agg](answer.split(',')))
88
  else:
89
+ result = predicted_agg + " > " + answer
90
+ results.append(result)
91
+ return results
92
 
93
  def execute_query(queries, table):
94