from transformers import MarianMTModel, MarianTokenizer, Seq2SeqTrainingArguments, Seq2SeqTrainer
from datasets import load_dataset, Dataset
import torch


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 请提前配置好Huggingface镜像：https://hf-mirror.com/
# 加载预训练的MarianMT模型和分词器
model_name = 'Helsinki-NLP/opus-mt-de-en'
tokenizer = MarianTokenizer.from_pretrained(model_name)
model = MarianMTModel.from_pretrained(model_name)

# 准备数据集
sentences = [
    {'src': 'ich mochte ein bier', 'tgt': 'i want a beer'},
    {'src': 'du trinkst einen kaffee', 'tgt': 'you drink a coffee'},
    {'src': 'er isst einen apfel', 'tgt': 'he eats an apple'},
    {'src': 'sie liest ein buch', 'tgt': 'she reads a book'},
    {'src': 'wir sehen einen film', 'tgt': 'we watch a movie'},
    {'src': 'ihr spielt ein spiel', 'tgt': 'you play a game'},
    {'src': 'sie schreiben einen brief', 'tgt': 'they write a letter'},
    {'src': 'ich fahre ein auto', 'tgt': 'i drive a car'},
    {'src': 'du kaufst ein haus', 'tgt': 'you buy a house'},
    {'src': 'er malt ein bild', 'tgt': 'he paints a picture'},
    {'src': 'ich esse einen apfel', 'tgt': 'i eat an apple'},
    {'src': 'du liest ein buch', 'tgt': 'you read a book'},
    {'src': 'wir schreiben einen brief', 'tgt': 'we write a letter'},
    {'src': 'er trinkt wasser', 'tgt': 'he drinks water'},
    {'src': 'sie spielt klavier', 'tgt': 'she plays piano'},
]

# 将数据转换为Dataset对象
dataset = Dataset.from_dict({
    'translation': [{'de': item['src'], 'en': item['tgt']} for item in sentences]
})

# 定义数据处理函数
def preprocess_function(examples):
    inputs = [ex['de'] for ex in examples['translation']]
    targets = [ex['en'] for ex in examples['translation']]
    model_inputs = tokenizer(inputs, max_length=128, truncation=True, padding='max_length')
    with tokenizer.as_target_tokenizer():
        labels = tokenizer(targets, max_length=128, truncation=True, padding='max_length')
    model_inputs['labels'] = labels['input_ids']
    return model_inputs

# 处理数据集
tokenized_dataset = dataset.map(preprocess_function, batched=True)

# 定义训练参数
training_args = Seq2SeqTrainingArguments(
    output_dir='./results',
    evaluation_strategy='epoch',
    learning_rate=2e-5,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=16,
    weight_decay=0.01,
    save_total_limit=3,
    num_train_epochs=300,
    predict_with_generate=True,
)

# 定义Trainer
trainer = Seq2SeqTrainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset,
    eval_dataset=tokenized_dataset,
    tokenizer=tokenizer,
)

# 训练模型
trainer.train()

# 测试模型并输出翻译结果
test_sentences = [
    'ich mochte ein bier',
    'du trinkst einen kaffee',
    'er isst einen apfel',
    'sie liest ein buch',
    'wir sehen einen film',
    'er trinkt bier'
]

# 翻译
inputs = tokenizer(test_sentences, return_tensors='pt', padding=True, truncation=True).to(device)
translated = model.generate(**inputs)
translated_texts = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]

# 输出翻译结果
for src, tgt in zip(test_sentences, translated_texts):
    print(f'{src} -> {tgt}')