Spaces:
Sleeping
Sleeping
robzchhangte
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -5,14 +5,14 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
|
5 |
token = os.getenv("hf_token")
|
6 |
|
7 |
# Load the translation model and tokenizer from Hugging Face
|
8 |
-
model_name = "robzchhangte/
|
9 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=token)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
|
11 |
|
12 |
# Translation function with max_length=512
|
13 |
def translate(text):
|
14 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
15 |
-
outputs = model.generate(inputs["input_ids"], max_length=512)
|
16 |
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
17 |
return translated_text
|
18 |
|
|
|
5 |
token = os.getenv("hf_token")
|
6 |
|
7 |
# Load the translation model and tokenizer from Hugging Face
|
8 |
+
model_name = "robzchhangte/enmz75-helcase"
|
9 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=token)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=token)
|
11 |
|
12 |
# Translation function with max_length=512
|
13 |
def translate(text):
|
14 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
15 |
+
outputs = model.generate(inputs["input_ids"], max_length=512)
|
16 |
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
17 |
return translated_text
|
18 |
|