File size: 4,536 Bytes
7e6d4d1
 
 
f957ae9
 
c2dbaac
 
 
 
 
7e6d4d1
 
 
70b4cc0
7f79884
 
f957ae9
 
 
 
 
 
 
 
 
 
 
 
 
c2dbaac
 
 
 
 
 
 
 
 
 
 
0754326
c2dbaac
 
 
7e6d4d1
70b4cc0
 
 
 
 
 
 
 
 
4c740cc
7e6d4d1
 
 
 
 
012dca9
 
 
 
 
 
 
 
d2827b6
 
 
 
 
 
 
 
 
 
2156cce
 
 
 
 
 
 
 
 
 
7e6d4d1
c2dbaac
 
 
 
7e6d4d1
 
 
 
 
70b4cc0
 
 
 
d2827b6
 
 
 
 
 
c2dbaac
2156cce
 
 
 
 
 
d2827b6
 
2156cce
012dca9
f957ae9
c2dbaac
 
7e6d4d1
70b4cc0
012dca9
d2827b6
2156cce
d312a64
d2827b6
 
f957ae9
8a39aa7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
# from transformers import AutoModelWithLMHead, AutoTokenizer
# Translate
from transformers import T5ForConditionalGeneration, T5Tokenizer
import gradio as grad

# make a question
# text2text_tkn = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-question-generation-ap')
# mdl = AutoModelWithLMHead.from_pretrained('mrm8488/t5-base-finetuned-question-generation-ap')

# summarize
# text2text_tkn = AutoTokenizer.from_pretrained('deep-learning-analytics/wikihow-t5-small')
# mdl = AutoModelWithLMHead.from_pretrained('deep-learning-analytics/wikihow-t5-small')

# translate, sentiment
text2text_tkn = T5Tokenizer.from_pretrained('t5-small')
mdl = T5ForConditionalGeneration.from_pretrained('t5-small')

def text2text(context, answer):
    input_text = "answer: %s context: %s </s>" % (answer, context)
    features = text2text_tkn([input_text], return_tensors = 'pt')
    output = mdl.generate(
        input_ids = features['input_ids'],
        attention_mask = features['attention_mask'],
        max_length = 64
    )
    response = text2text_tkn.decode(output[0])

    return response

def text2text_summary(para):
    initial_txt = para.strip().replace("\n", "")
    tkn_text = text2text_tkn.encode(initial_txt, return_tensors = 'pt')
    tkn_ids = mdl.generate(
        tkn_text,
        max_length = 250,
        num_beams = 5,
        repetition_penalty = 2.5,
        early_stopping = True
    )

    response = text2text_tkn.decode(tkn_ids[0], skip_special_tokens = True)

    return response

def text2text_translate(text):
    inp = "translate English to German:: " + text
    enc = text2text_tkn(inp, return_tensors = 'pt')
    tokens = mdl.generate(**enc)
    response = text2text_tkn.batch_decode(tokens)

    return response

def text2text_sentiment(text):
    inp = "sst2 sentence: " + text
    enc = text2text_tkn(inp, return_tensors = 'pt')
    tokens = mdl.generate(**enc)
    response = text2text_tkn.batch_decode(tokens)

    return response

def text2text_acceptable_sentence(text):
    inp = "cola sentence: " + text
    enc = text2text_tkn(inp, return_tensors = 'pt')
    tokens = mdl.generate(**enc)
    response = text2text_tkn.batch_decode(tokens)

    return response

def text2text_paraphrase(sentence1, sentence2):
    inp1 = "mrpc sentence1: " + sentence1
    inp2 = "sentence2: " + sentence2
    combined_inp = inp1 + " " + inp2
    enc = text2text_tkn(combined_inp, return_tensors = 'pt')
    tokens = mdl.generate(**enc)
    response = text2text_tkn.batch_decode(tokens)

    return response

def text2text_deductible(sentence1, sentence2):
    inp1 = "rte sentence1: " + sentence1
    inp2 = "sentence2: " + sentence2
    combined_inpu = inp1 + " " + inp2
    enc = text2text_tkn(combined_inp, return_tensors = 'pt')
    tokens = mdl.generate(**enc)
    response = text2text_tkn.batch_decode(tokens)

    return response

# question
# context = grad.Textbox(lines = 10, label = 'English', placeholder = 'Context')
# ans = grad.Textbox(lines = 1, label = 'Answer')
# out = grad.Textbox(lines = 1, label = 'Generated Question')

# summary
# para = grad.Textbox(lines = 10, label = 'Paragraph', placeholder = 'Copy paragraph')
# out = grad.Textbox(lines = 1, label = 'Summary')

# tranlate
# para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
# out = grad.Textbox(lines = 1, label = 'German Translation')

# sentiment
# para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
# out = grad.Textbox(lines = 1, label = 'Sentiment')

# # grammatical acceptance
# para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
# out = grad.Textbox(lines = 1, label = 'Whether the sentence is acceptable or not')

# # paraphrase
# sent1 = grad.Textbox(lines = 1, label = 'Sentence1', placeholder = 'Text in English')
# sent2 = grad.Textbox(lines = 1, label = 'Sentence2', placeholder = 'Text in English')
# out = grad.Textbox(lines = 1, label = 'Paraphrase')

# deduction
sent1 = grad.Textbox(lines = 1, label = 'Sentence1', placeholder = 'Text in English')
sent2 = grad.Textbox(lines = 1, label = 'Sentence2', placeholder = 'Text in English')
out = grad.Textbox(lines = 1, label = 'Deduction')

grad.Interface(
   # text2text,
    # inputs = [context, ans],
    # text2text_summary,
    # text2text_translate,
    # text2text_sentiment,
    # text2text_acceptable_sentence,
    # text2text_paraphrase,
    text2text_deductible,
    # inputs = para,
    inputs = [sent1, sent2],
    outputs = out
).launch()