hf-dongpyo commited on
Commit
7e6d4d1
1 Parent(s): 0754326

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -6
app.py CHANGED
@@ -1,4 +1,6 @@
1
- from transformers import AutoModelWithLMHead, AutoTokenizer
 
 
2
  import gradio as grad
3
 
4
  # make a question
@@ -6,8 +8,12 @@ import gradio as grad
6
  # mdl = AutoModelWithLMHead.from_pretrained('mrm8488/t5-base-finetuned-question-generation-ap')
7
 
8
  # summarize
9
- text2text_tkn = AutoTokenizer.from_pretrained('deep-learning-analytics/wikihow-t5-small')
10
- mdl = AutoModelWithLMHead.from_pretrained('deep-learning-analytics/wikihow-t5-small')
 
 
 
 
11
 
12
  def text2text(context, answer):
13
  input_text = "answer: %s context: %s </s>" % (answer, context)
@@ -36,17 +42,32 @@ def text2text_summary(para):
36
 
37
  return response
38
 
 
 
 
 
 
 
 
 
 
39
  # context = grad.Textbox(lines = 10, label = 'English', placeholder = 'Context')
40
  # ans = grad.Textbox(lines = 1, label = 'Answer')
41
  # out = grad.Textbox(lines = 1, label = 'Generated Question')
42
 
43
- para = grad.Textbox(lines = 10, label = 'Paragraph', placeholder = 'Copy paragraph')
44
- out = grad.Textbox(lines = 1, label = 'Summary')
 
 
 
 
 
45
 
46
  grad.Interface(
47
  # text2text,
48
  # inputs = [context, ans],
49
- text2text_summary,
 
50
  inputs = para,
51
  outputs = out
52
  ).launch()
 
1
+ # from transformers import AutoModelWithLMHead, AutoTokenizer
2
+ # Translate
3
+ from transformers import T5ForConditionalGeneration, T5Tokenizer
4
  import gradio as grad
5
 
6
  # make a question
 
8
  # mdl = AutoModelWithLMHead.from_pretrained('mrm8488/t5-base-finetuned-question-generation-ap')
9
 
10
  # summarize
11
+ # text2text_tkn = AutoTokenizer.from_pretrained('deep-learning-analytics/wikihow-t5-small')
12
+ # mdl = AutoModelWithLMHead.from_pretrained('deep-learning-analytics/wikihow-t5-small')
13
+
14
+ # translate
15
+ text2text_tkn = T5Tokenizer.from_pretrained('KETI-AIR/ke-t5-small')
16
+ mdl = T5ForConditionalGeneration.from_pretrained('KETI-AIR/ke-t5-small')
17
 
18
  def text2text(context, answer):
19
  input_text = "answer: %s context: %s </s>" % (answer, context)
 
42
 
43
  return response
44
 
45
+ def text2text_translate(text):
46
+ inp = "translate English to Korean:: " + text
47
+ enc = text2text_tkn(inpu, return_tensors = 'pt')
48
+ tokens = mdl.generate(**enc)
49
+ response = text2text_tkn.batch_decode(tokens)
50
+
51
+ return response
52
+
53
+ # question
54
  # context = grad.Textbox(lines = 10, label = 'English', placeholder = 'Context')
55
  # ans = grad.Textbox(lines = 1, label = 'Answer')
56
  # out = grad.Textbox(lines = 1, label = 'Generated Question')
57
 
58
+ # summary
59
+ # para = grad.Textbox(lines = 10, label = 'Paragraph', placeholder = 'Copy paragraph')
60
+ # out = grad.Textbox(lines = 1, label = 'Summary')
61
+
62
+ # tranlate
63
+ para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
64
+ out = grad.Textbox(lines = 1, label = 'Korean Translation')
65
 
66
  grad.Interface(
67
  # text2text,
68
  # inputs = [context, ans],
69
+ # text2text_summary,
70
+ text2text_translate,
71
  inputs = para,
72
  outputs = out
73
  ).launch()