Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,20 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
|
|
|
|
|
|
4 |
model_name = "Mhassanen/nllb-200-600M-En-Ar"
|
5 |
tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="eng_Latn", tgt_lang="arz_Arab")
|
6 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
7 |
|
8 |
def translate2(text):
|
|
|
|
|
|
|
|
|
|
|
9 |
inputs = tokenizer(text, return_tensors="pt", padding=True)
|
10 |
translated_tokens = model.generate(**inputs)
|
11 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
|
@@ -23,8 +32,6 @@ with st.sidebar:
|
|
23 |
st.markdown('''
|
24 |
- This App powered by [Mhassanen/nllb-200-600M-En-Ar](https://huggingface.co/Mhassanen/nllb-200-600M-En-Ar) Language model
|
25 |
''')
|
26 |
-
# st.markdown("---")
|
27 |
-
|
28 |
|
29 |
st.title("Try Now!")
|
30 |
|
|
|
1 |
+
import os
|
2 |
import streamlit as st
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
+
# Enable Zero GPU
|
6 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "true"
|
7 |
+
|
8 |
model_name = "Mhassanen/nllb-200-600M-En-Ar"
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="eng_Latn", tgt_lang="arz_Arab")
|
10 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
11 |
|
12 |
def translate2(text):
|
13 |
+
import torch
|
14 |
+
# Ensure Zero GPU is enabled
|
15 |
+
torch.set_grad_enabled(False)
|
16 |
+
torch.set_num_threads(1)
|
17 |
+
|
18 |
inputs = tokenizer(text, return_tensors="pt", padding=True)
|
19 |
translated_tokens = model.generate(**inputs)
|
20 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
|
|
|
32 |
st.markdown('''
|
33 |
- This App powered by [Mhassanen/nllb-200-600M-En-Ar](https://huggingface.co/Mhassanen/nllb-200-600M-En-Ar) Language model
|
34 |
''')
|
|
|
|
|
35 |
|
36 |
st.title("Try Now!")
|
37 |
|