Pavankalyan commited on
Commit
28be7b1
1 Parent(s): 0f191a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,18 +1,19 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
3
- #tokenizer1 = AutoTokenizer.from_pretrained("salesken/translation-hi-en")
4
- #model1 = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en")
5
- tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-16B-mono")
6
- model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-16B-mono")
7
 
8
  def greet(name):
9
- #hin_snippet = name
10
- #inputs = tokenizer1.encode(hin_snippet, return_tensors="pt",padding=True,max_length=512,truncation=True)
11
- #outputs = model1.generate(inputs, max_length=128, num_beams=None, early_stopping=True)
12
- #translated = tokenizer1.decode(outputs[0]).replace('<pad>',"").strip().lower()
13
- input_ids = tokenizer("# "+name, return_tensors="pt").input_ids
14
- sample1 = model.generate(input_ids, max_length=128)
15
- return tokenizer.decode(sample1[0], skip_special_tokens=True)
 
16
 
17
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
18
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
3
+ tokenizer1 = AutoTokenizer.from_pretrained("salesken/translation-hi-en")
4
+ model1 = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en")
5
+ #tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-16B-mono")
6
+ #model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-16B-mono")
7
 
8
  def greet(name):
9
+ hin_snippet = name
10
+ inputs = tokenizer1.encode(hin_snippet, return_tensors="pt",padding=True,max_length=512,truncation=True)
11
+ outputs = model1.generate(inputs, max_length=128, num_beams=None, early_stopping=True)
12
+ translated = tokenizer1.decode(outputs[0]).replace('<pad>',"").strip().lower()
13
+ #input_ids = tokenizer("# "+name, return_tensors="pt").input_ids
14
+ #sample1 = model.generate(input_ids, max_length=128)
15
+ #return tokenizer.decode(sample1[0], skip_special_tokens=True)
16
+ return translated
17
 
18
  iface = gr.Interface(fn=greet, inputs="text", outputs="text")
19
  iface.launch()