Spaces:
Runtime error
Runtime error
Pavankalyan
commited on
Commit
•
0f191a7
1
Parent(s):
e78971a
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
3 |
-
tokenizer1 = AutoTokenizer.from_pretrained("salesken/translation-hi-en")
|
4 |
-
model1 = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en")
|
5 |
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-16B-mono")
|
6 |
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-16B-mono")
|
7 |
|
8 |
def greet(name):
|
9 |
-
hin_snippet = name
|
10 |
-
inputs = tokenizer1.encode(hin_snippet, return_tensors="pt",padding=True,max_length=512,truncation=True)
|
11 |
-
outputs = model1.generate(inputs, max_length=128, num_beams=None, early_stopping=True)
|
12 |
-
translated = tokenizer1.decode(outputs[0]).replace('<pad>',"").strip().lower()
|
13 |
-
input_ids = tokenizer("# "+
|
14 |
sample1 = model.generate(input_ids, max_length=128)
|
15 |
return tokenizer.decode(sample1[0], skip_special_tokens=True)
|
16 |
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
3 |
+
#tokenizer1 = AutoTokenizer.from_pretrained("salesken/translation-hi-en")
|
4 |
+
#model1 = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en")
|
5 |
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-16B-mono")
|
6 |
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-16B-mono")
|
7 |
|
8 |
def greet(name):
|
9 |
+
#hin_snippet = name
|
10 |
+
#inputs = tokenizer1.encode(hin_snippet, return_tensors="pt",padding=True,max_length=512,truncation=True)
|
11 |
+
#outputs = model1.generate(inputs, max_length=128, num_beams=None, early_stopping=True)
|
12 |
+
#translated = tokenizer1.decode(outputs[0]).replace('<pad>',"").strip().lower()
|
13 |
+
input_ids = tokenizer("# "+name, return_tensors="pt").input_ids
|
14 |
sample1 = model.generate(input_ids, max_length=128)
|
15 |
return tokenizer.decode(sample1[0], skip_special_tokens=True)
|
16 |
|