Meena commited on
Commit
85eb3dd
1 Parent(s): d9ea2c1

Update app/tapas.py

Browse files
Files changed (1) hide show
  1. app/tapas.py +54 -59
app/tapas.py CHANGED
@@ -7,69 +7,64 @@ import re
7
 
8
  p = re.compile('\d+(\.\d+)?')
9
 
10
- # Define the questions
11
- queries = [
12
- "When did Spider-Man: No Way Home release?",
13
- "which Movies have rating 5?"
14
- ]
15
 
16
  def load_model_and_tokenizer():
17
- """
18
  Load
19
- """
20
- # Load pretrained tokenizer: TAPAS finetuned on WikiTable Questions
21
- tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
22
 
23
- # Load pretrained model: TAPAS finetuned on WikiTable Questions
24
- model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
25
 
26
- # Return tokenizer and model
27
- return tokenizer, model
28
 
29
 
30
  def prepare_inputs(table, queries, tokenizer):
31
- """
32
  Convert dictionary into data frame and tokenize inputs given queries.
33
- """
34
- # Prepare inputs
35
- # table = pd.DataFrame.from_dict(data)
36
- # table = netflix_df[['title', 'release_year', 'rating']].astype('str').head(50)
37
- table = table.astype('str').head(100)
38
- inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="pt")
39
-
40
- # Return things
41
- return table, inputs
42
 
43
 
44
  def generate_predictions(inputs, model, tokenizer):
45
- """
46
  Generate predictions for some tokenized input.
47
- """
48
- # Generate model results
49
- outputs = model(**inputs)
50
 
51
- # Convert logit outputs into predictions for table cells and aggregation operators
52
- predicted_table_cell_coords, predicted_aggregation_operators = tokenizer.convert_logits_to_predictions(
53
  inputs,
54
  outputs.logits.detach(),
55
  outputs.logits_aggregation.detach()
56
- )
57
-
58
- # Return values
59
- return predicted_table_cell_coords, predicted_aggregation_operators
60
 
61
 
62
  def postprocess_predictions(predicted_aggregation_operators, predicted_table_cell_coords, table):
63
- """
64
  Compute the predicted operation and nicely structure the answers.
65
- """
66
- # Process predicted aggregation operators
67
- aggregation_operators = {0: "NONE", 1: "SUM", 2: "AVERAGE", 3:"COUNT"}
68
- aggregation_predictions_string = [aggregation_operators[x] for x in predicted_aggregation_operators]
69
-
70
- # Process predicted table cell coordinates
71
- answers = []
72
- for agg, coordinates in zip(predicted_aggregation_operators, predicted_table_cell_coords):
73
  if len(coordinates) == 1:
74
  # 1 cell
75
  answers.append(table.iat[coordinates[0]])
@@ -80,30 +75,30 @@ def postprocess_predictions(predicted_aggregation_operators, predicted_table_cel
80
  cell_values.append(table.iat[coordinate])
81
  answers.append(", ".join(cell_values))
82
 
83
- # Return values
84
- return aggregation_predictions_string, answers
85
 
86
 
87
  def show_answers(queries, answers, aggregation_predictions_string):
88
  """
89
  Visualize the postprocessed answers.
90
  """
91
- agg = {"NONE": lambda x: x, "SUM" : lambda x: sum(x), "AVERAGE": lambda x: (sum(x) / len(x)), "COUNT": lambda x: len(x)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
- for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):
94
- print(query)
95
- if predicted_agg == "NONE":
96
- print("Predicted answer: " + answer)
97
- else:
98
- if all([not p.match(val) == None for val in answer.split(', ')]):
99
- # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](list(map(float, answer.split(','))))))
100
- return "Predicted answer: " + str(agg[predicted_agg](list(map(float, answer.split(',')))))
101
- elif predicted_agg == "COUNT":
102
- # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](answer.split(','))))
103
- return "Predicted answer: " + str(agg[predicted_agg](answer.split(',')))
104
- else:
105
- return "Predicted answer: " + predicted_agg + " > " + answer
106
-
107
 
108
 
109
 
 
7
 
8
  p = re.compile('\d+(\.\d+)?')
9
 
 
 
 
 
 
10
 
11
  def load_model_and_tokenizer():
12
+ """
13
  Load
14
+ """
15
+ # Load pretrained tokenizer: TAPAS finetuned on WikiTable Questions
16
+ tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
17
 
18
+ # Load pretrained model: TAPAS finetuned on WikiTable Questions
19
+ model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
20
 
21
+ # Return tokenizer and model
22
+ return tokenizer, model
23
 
24
 
25
  def prepare_inputs(table, queries, tokenizer):
26
+ """
27
  Convert dictionary into data frame and tokenize inputs given queries.
28
+ """
29
+ # Prepare inputs
30
+ # table = pd.DataFrame.from_dict(data)
31
+ # table = netflix_df[['title', 'release_year', 'rating']].astype('str').head(50)
32
+ table = table.astype('str').head(100)
33
+ inputs = tokenizer(table=table, queries=queries, padding='max_length', return_tensors="pt")
34
+
35
+ # Return things
36
+ return table, inputs
37
 
38
 
39
  def generate_predictions(inputs, model, tokenizer):
40
+ """
41
  Generate predictions for some tokenized input.
42
+ """
43
+ # Generate model results
44
+ outputs = model(**inputs)
45
 
46
+ # Convert logit outputs into predictions for table cells and aggregation operators
47
+ predicted_table_cell_coords, predicted_aggregation_operators = tokenizer.convert_logits_to_predictions(
48
  inputs,
49
  outputs.logits.detach(),
50
  outputs.logits_aggregation.detach()
51
+ )
52
+
53
+ # Return values
54
+ return predicted_table_cell_coords, predicted_aggregation_operators
55
 
56
 
57
  def postprocess_predictions(predicted_aggregation_operators, predicted_table_cell_coords, table):
58
+ """
59
  Compute the predicted operation and nicely structure the answers.
60
+ """
61
+ # Process predicted aggregation operators
62
+ aggregation_operators = {0: "NONE", 1: "SUM", 2: "AVERAGE", 3:"COUNT"}
63
+ aggregation_predictions_string = [aggregation_operators[x] for x in predicted_aggregation_operators]
64
+
65
+ # Process predicted table cell coordinates
66
+ answers = []
67
+ for agg, coordinates in zip(predicted_aggregation_operators, predicted_table_cell_coords):
68
  if len(coordinates) == 1:
69
  # 1 cell
70
  answers.append(table.iat[coordinates[0]])
 
75
  cell_values.append(table.iat[coordinate])
76
  answers.append(", ".join(cell_values))
77
 
78
+ # Return values
79
+ return aggregation_predictions_string, answers
80
 
81
 
82
  def show_answers(queries, answers, aggregation_predictions_string):
83
  """
84
  Visualize the postprocessed answers.
85
  """
86
+ agg = {"NONE": lambda x: x, "SUM" : lambda x: sum(x), "AVERAGE": lambda x: (sum(x) / len(x)), "COUNT": lambda x: len(x)}
87
+
88
+ for query, answer, predicted_agg in zip(queries, answers, aggregation_predictions_string):
89
+ print(query)
90
+ if predicted_agg == "NONE":
91
+ print("Predicted answer: " + answer)
92
+ else:
93
+ if all([not p.match(val) == None for val in answer.split(', ')]):
94
+ # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](list(map(float, answer.split(','))))))
95
+ return "Predicted answer: " + str(agg[predicted_agg](list(map(float, answer.split(',')))))
96
+ elif predicted_agg == "COUNT":
97
+ # print("Predicted answer: " + predicted_agg + "(" + answer + ") = " + str(agg[predicted_agg](answer.split(','))))
98
+ return "Predicted answer: " + str(agg[predicted_agg](answer.split(',')))
99
+ else:
100
+ return "Predicted answer: " + predicted_agg + " > " + answer
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
 
104