Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,65 @@
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
import pandas as pd
|
3 |
import pysrt
|
4 |
from transformers import MarianMTModel, MarianTokenizer
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
def translate_text(text, source_language_code, target_language_code):
|
16 |
-
# Construct model name using ISO 639-1 codes
|
17 |
model_name = f"Helsinki-NLP/opus-mt-{source_language_code}-{target_language_code}"
|
18 |
-
|
19 |
-
# Check if source and target languages are the same, which is not supported for translation
|
20 |
if source_language_code == target_language_code:
|
21 |
return "Translation between the same languages is not supported."
|
22 |
-
|
23 |
-
# Load tokenizer and model
|
24 |
try:
|
25 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
26 |
model = MarianMTModel.from_pretrained(model_name)
|
27 |
except Exception as e:
|
28 |
return f"Failed to load model for {source_language_code} to {target_language_code}: {str(e)}"
|
29 |
-
|
30 |
-
# Translate text
|
31 |
-
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512))
|
32 |
-
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
33 |
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
def translate_srt(input_file, source_language_code, target_language_code):
|
37 |
-
# Load SRT file
|
38 |
subs = pysrt.open(input_file.name)
|
39 |
-
|
40 |
-
# Initialize an empty list to store translated subtitles
|
41 |
translated_subs = []
|
42 |
-
|
43 |
-
# Translate each subtitle
|
44 |
for idx, sub in enumerate(subs):
|
45 |
translated_text = translate_text(sub.text, source_language_code, target_language_code)
|
46 |
-
# Construct the translated subtitle with timestamp and line number
|
47 |
translated_sub = pysrt.SubRipItem(index=idx+1, start=sub.start, end=sub.end, text=translated_text)
|
48 |
translated_subs.append(translated_sub)
|
49 |
-
|
50 |
-
# Save translated subtitles to a new SRT file
|
51 |
translated_file = pysrt.SubRipFile(translated_subs)
|
52 |
translated_srt_path = input_file.name.replace(".srt", f"_{target_language_code}.srt")
|
53 |
translated_file.save(translated_srt_path)
|
|
|
54 |
return translated_srt_path
|
55 |
|
56 |
st.title("SRT Translator")
|
57 |
st.write("Translate subtitles from one language to another.")
|
58 |
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
61 |
|
62 |
file_input = st.file_uploader("Upload SRT File", type=["srt"])
|
63 |
|
|
|
1 |
+
import io
|
2 |
+
import requests
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
5 |
import pysrt
|
6 |
from transformers import MarianMTModel, MarianTokenizer
|
7 |
|
8 |
+
def fetch_languages(url):
|
9 |
+
response = requests.get(url)
|
10 |
+
if response.status_code == 200:
|
11 |
+
# Convert bytes to a string using decode, then create a file-like object with io.StringIO
|
12 |
+
csv_content = response.content.decode('utf-8')
|
13 |
+
df = pd.read_csv(io.StringIO(csv_content), delimiter="|", skiprows=2, header=None).dropna(axis=1, how='all')
|
14 |
+
df.columns = ['ISO 639-1', 'ISO 639-2', 'Language Name', 'Native Name']
|
15 |
+
df['ISO 639-1'] = df['ISO 639-1'].str.strip()
|
16 |
+
language_options = [(row['ISO 639-1'], f"{row['ISO 639-1']} - {row['Language Name']}") for index, row in df.iterrows()]
|
17 |
+
return language_options
|
18 |
+
else:
|
19 |
+
st.error("Failed to fetch language options. Please try again later.")
|
20 |
+
return []
|
21 |
|
22 |
def translate_text(text, source_language_code, target_language_code):
|
|
|
23 |
model_name = f"Helsinki-NLP/opus-mt-{source_language_code}-{target_language_code}"
|
|
|
|
|
24 |
if source_language_code == target_language_code:
|
25 |
return "Translation between the same languages is not supported."
|
|
|
|
|
26 |
try:
|
27 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
28 |
model = MarianMTModel.from_pretrained(model_name)
|
29 |
except Exception as e:
|
30 |
return f"Failed to load model for {source_language_code} to {target_language_code}: {str(e)}"
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
translated_texts = []
|
33 |
+
for sentence in text.split("\n"):
|
34 |
+
translated = model.generate(**tokenizer(sentence, return_tensors="pt", padding=True, truncation=True, max_length=512))
|
35 |
+
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
36 |
+
translated_texts.append(translated_text)
|
37 |
+
return "\n".join(translated_texts)
|
38 |
|
39 |
def translate_srt(input_file, source_language_code, target_language_code):
|
|
|
40 |
subs = pysrt.open(input_file.name)
|
|
|
|
|
41 |
translated_subs = []
|
42 |
+
progress_bar = st.progress(0)
|
|
|
43 |
for idx, sub in enumerate(subs):
|
44 |
translated_text = translate_text(sub.text, source_language_code, target_language_code)
|
|
|
45 |
translated_sub = pysrt.SubRipItem(index=idx+1, start=sub.start, end=sub.end, text=translated_text)
|
46 |
translated_subs.append(translated_sub)
|
47 |
+
progress_bar.progress((idx + 1) / len(subs))
|
|
|
48 |
translated_file = pysrt.SubRipFile(translated_subs)
|
49 |
translated_srt_path = input_file.name.replace(".srt", f"_{target_language_code}.srt")
|
50 |
translated_file.save(translated_srt_path)
|
51 |
+
progress_bar.empty()
|
52 |
return translated_srt_path
|
53 |
|
54 |
st.title("SRT Translator")
|
55 |
st.write("Translate subtitles from one language to another.")
|
56 |
|
57 |
+
# Fetch language options
|
58 |
+
url = "https://huggingface.co/Lenylvt/LanguageISO/resolve/main/iso.md"
|
59 |
+
language_options = fetch_languages(url)
|
60 |
+
|
61 |
+
source_language_code = st.selectbox("Select Source Language", options=language_options)
|
62 |
+
target_language_code = st.selectbox("Select Target Language", options=language_options)
|
63 |
|
64 |
file_input = st.file_uploader("Upload SRT File", type=["srt"])
|
65 |
|