hf-dongpyo commited on
Commit
70b4cc0
1 Parent(s): 7f79884

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -4
app.py CHANGED
@@ -11,7 +11,7 @@ import gradio as grad
11
  # text2text_tkn = AutoTokenizer.from_pretrained('deep-learning-analytics/wikihow-t5-small')
12
  # mdl = AutoModelWithLMHead.from_pretrained('deep-learning-analytics/wikihow-t5-small')
13
 
14
- # translate
15
  text2text_tkn = T5Tokenizer.from_pretrained('t5-small')
16
  mdl = T5ForConditionalGeneration.from_pretrained('t5-small')
17
 
@@ -43,7 +43,15 @@ def text2text_summary(para):
43
  return response
44
 
45
  def text2text_translate(text):
46
- inp = "translate English to Korean:: " + text
 
 
 
 
 
 
 
 
47
  enc = text2text_tkn(inp, return_tensors = 'pt')
48
  tokens = mdl.generate(**enc)
49
  response = text2text_tkn.batch_decode(tokens)
@@ -60,14 +68,19 @@ def text2text_translate(text):
60
  # out = grad.Textbox(lines = 1, label = 'Summary')
61
 
62
  # tranlate
 
 
 
 
63
  para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
64
- out = grad.Textbox(lines = 1, label = 'Korean Translation')
65
 
66
  grad.Interface(
67
  # text2text,
68
  # inputs = [context, ans],
69
  # text2text_summary,
70
- text2text_translate,
 
71
  inputs = para,
72
  outputs = out
73
  ).launch()
 
11
  # text2text_tkn = AutoTokenizer.from_pretrained('deep-learning-analytics/wikihow-t5-small')
12
  # mdl = AutoModelWithLMHead.from_pretrained('deep-learning-analytics/wikihow-t5-small')
13
 
14
+ # translate, sentiment
15
  text2text_tkn = T5Tokenizer.from_pretrained('t5-small')
16
  mdl = T5ForConditionalGeneration.from_pretrained('t5-small')
17
 
 
43
  return response
44
 
45
  def text2text_translate(text):
46
+ inp = "translate English to German:: " + text
47
+ enc = text2text_tkn(inp, return_tensors = 'pt')
48
+ tokens = mdl.generate(**enc)
49
+ response = text2text_tkn.batch_decode(tokens)
50
+
51
+ return response
52
+
53
+ def text2text_sentiment(text):
54
+ inp = "sst2 sentence: " + text
55
  enc = text2text_tkn(inp, return_tensors = 'pt')
56
  tokens = mdl.generate(**enc)
57
  response = text2text_tkn.batch_decode(tokens)
 
68
  # out = grad.Textbox(lines = 1, label = 'Summary')
69
 
70
  # tranlate
71
+ # para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
72
+ # out = grad.Textbox(lines = 1, label = 'German Translation')
73
+
74
+ # sentiment
75
  para = grad.Textbox(lines = 1, label = 'English Text', placeholder = 'Text in English')
76
+ out = grad.Textbox(lines = 1, label = 'Sentiment')
77
 
78
  grad.Interface(
79
  # text2text,
80
  # inputs = [context, ans],
81
  # text2text_summary,
82
+ # text2text_translate,
83
+ text2text_sentiment,
84
  inputs = para,
85
  outputs = out
86
  ).launch()