from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from tqdm import tqdm
import torch

# Load the model and tokenizer
model_path = '/media/ubuntu-2/fosu2_2/LLM/mbart_nl_zh/checkpoint-56250-'
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to('cuda')
tokenizer.src_lang = "nl_XX"

# Function to translate texts in batch
def translate_texts_batched(texts):
    # Tokenize all texts in the batch
    encoded = tokenizer(texts, return_tensors='pt', padding=True).to('cuda')
    # Generate translation for the entire batch
    generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.lang_code_to_id["zh_CN"])
    # Decode the generated tokens
    result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
    return result

# Read the content of the file
with open('testB.nl', 'r', encoding='utf-8') as file:
    lines = file.readlines()

# Determine the batch size
batch_size = 10

# Prepare the progress bar
total_lines = len(lines)
progress_bar = tqdm(total=total_lines, desc="Translating", unit='line')

# Split lines into batches and translate
translated_lines = []
for i in range(0, len(lines), batch_size):
    batch_lines = lines[i:i+batch_size]
    batch_translated = translate_texts_batched([line.strip() for line in batch_lines])
    for translation in batch_translated:
        translated_lines.append(translation)
        progress_bar.update(batch_size)  # Update progress bar for all lines in the batch

# Close the progress bar
progress_bar.close()

# Write the translation to a new file
with open('testB.txt', 'w', encoding='utf-8') as file:
    for line in translated_lines:
        file.write(line + '\n')

print("Translation completed and saved")
