new
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoConfig, AutoTokenizer
|
3 |
from bert_graph import BertForMultipleChoice
|
4 |
-
import ipdb
|
5 |
import torch
|
6 |
import copy
|
7 |
from itertools import chain
|
@@ -13,14 +12,12 @@ def preprocess_function_exp(examples, tokenizer):
|
|
13 |
|
14 |
# Flatten out
|
15 |
pair_list = examples
|
16 |
-
# ipdb.set_trace()
|
17 |
pair_len = [len(item) for item in pair_list]
|
18 |
|
19 |
first_sentences = []
|
20 |
second_sentences = []
|
21 |
for line_list in pair_list:
|
22 |
for line in line_list:
|
23 |
-
# ipdb.set_trace()
|
24 |
sent_item = line.strip().split('\t')
|
25 |
first_sentences.append(sent_item[0].strip())
|
26 |
second_sentences.append(sent_item[1].strip())
|
@@ -100,8 +97,6 @@ def max_vote(logits1, logits2, pred1, pred2):
|
|
100 |
# torch.topk(soft_logits1, n=2)
|
101 |
values_1, _ = soft_logits1.topk(k=2)
|
102 |
values_2, _ = soft_logits2.topk(k=2)
|
103 |
-
# import ipdb
|
104 |
-
# ipdb.set_trace()
|
105 |
# if (values_1[0] - values_2[0]) > (values_1[1] - values_2[1]):
|
106 |
# pred_res.append(int(pred1[i].detach().cpu().numpy()))
|
107 |
# else:
|
@@ -136,7 +131,6 @@ def model_infer(input_a, input_b):
|
|
136 |
examples = [[input_a+'\t'+input_a, input_a+'\t'+input_b, input_b+'\t'+input_a, input_b+'\t'+input_b]]
|
137 |
tokenized_inputs = preprocess_function_exp(examples, tokenizer)
|
138 |
tokenized_inputs = DCForMultipleChoice(tokenized_inputs, tokenizer)
|
139 |
-
# ipdb.set_trace()
|
140 |
outputs = model(**tokenized_inputs)
|
141 |
predictions, scores = max_vote(outputs.logits[0], outputs.logits[1], outputs.logits[0].argmax(dim=-1), outputs.logits[1].argmax(dim=-1))
|
142 |
|
@@ -148,7 +142,6 @@ def model_infer(input_a, input_b):
|
|
148 |
label_b_a = label_space[prediction_b_a]
|
149 |
|
150 |
return 'Head Argument {} Tail Argument'.format(label_a_b, label_b_a)
|
151 |
-
# ipdb.set_trace()
|
152 |
|
153 |
|
154 |
with gr.Blocks() as demo:
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoConfig, AutoTokenizer
|
3 |
from bert_graph import BertForMultipleChoice
|
|
|
4 |
import torch
|
5 |
import copy
|
6 |
from itertools import chain
|
|
|
12 |
|
13 |
# Flatten out
|
14 |
pair_list = examples
|
|
|
15 |
pair_len = [len(item) for item in pair_list]
|
16 |
|
17 |
first_sentences = []
|
18 |
second_sentences = []
|
19 |
for line_list in pair_list:
|
20 |
for line in line_list:
|
|
|
21 |
sent_item = line.strip().split('\t')
|
22 |
first_sentences.append(sent_item[0].strip())
|
23 |
second_sentences.append(sent_item[1].strip())
|
|
|
97 |
# torch.topk(soft_logits1, n=2)
|
98 |
values_1, _ = soft_logits1.topk(k=2)
|
99 |
values_2, _ = soft_logits2.topk(k=2)
|
|
|
|
|
100 |
# if (values_1[0] - values_2[0]) > (values_1[1] - values_2[1]):
|
101 |
# pred_res.append(int(pred1[i].detach().cpu().numpy()))
|
102 |
# else:
|
|
|
131 |
examples = [[input_a+'\t'+input_a, input_a+'\t'+input_b, input_b+'\t'+input_a, input_b+'\t'+input_b]]
|
132 |
tokenized_inputs = preprocess_function_exp(examples, tokenizer)
|
133 |
tokenized_inputs = DCForMultipleChoice(tokenized_inputs, tokenizer)
|
|
|
134 |
outputs = model(**tokenized_inputs)
|
135 |
predictions, scores = max_vote(outputs.logits[0], outputs.logits[1], outputs.logits[0].argmax(dim=-1), outputs.logits[1].argmax(dim=-1))
|
136 |
|
|
|
142 |
label_b_a = label_space[prediction_b_a]
|
143 |
|
144 |
return 'Head Argument {} Tail Argument'.format(label_a_b, label_b_a)
|
|
|
145 |
|
146 |
|
147 |
with gr.Blocks() as demo:
|