# %%
import os

import pandas as pd
import re


# 定义清洗函数
def clean_text(text):
    text = re.sub(r'[^\w\s]', '', text)  # 去除特殊符号
    text = ' '.join(text.split())  # 去除多余的空格
    return text


# 读取并清洗数据
def read_and_clean(file_path):
    cleaned_rows = []
    with open(file_path, 'r', encoding='utf-8') as file:
        for idx, line in enumerate(file):
            columns = line.strip().split('\t')
            if len(columns) == 2:
                zh, nl = columns
                zh = clean_text(zh)
                nl = clean_text(nl)
                cleaned_rows.append({'id': idx, 'translation': {'zh': zh, 'nl': nl}})

    # 转换为 DataFrame
    df = pd.DataFrame(cleaned_rows)
    return df


# %%
from datasets import Dataset, DatasetDict

# 加载并清洗数据
train_df = read_and_clean('train/pair/train.nl-zh')
dev_df = read_and_clean('dev/dev.nl-zh')

# 转换为 Dataset 对象
train_dataset = Dataset.from_pandas(train_df)
dev_dataset = Dataset.from_pandas(dev_df)

# 如果需要将两个数据集合并到 DatasetDict 中
datasets_dict = DatasetDict({
    'train': train_dataset,
    'dev': dev_dataset
})
# %%
# datasets_dict
# %%
# datasets_dict['train'][0]['translation']
# %%
from transformers import AutoTokenizer

model_path = 'mbart-large-cc25-finetuned/checkpoint-422500'
tokenizer = AutoTokenizer.from_pretrained(model_path, return_tensors='pt')
tokenizer.src_lang = 'nl_XX'
tokenizer.tgt_lang = 'zh_CN'
# %%
max_length = 128


def preprocess_function(examples):
    inputs = [ex['nl'] for ex in examples['translation']]
    targets = [ex['zh'] for ex in examples['translation']]
    model_inputs = tokenizer(
        inputs,
        text_target=targets,
        max_length=max_length,
        truncation=True,
    )
    return model_inputs


# %%
tokenized_datasets = datasets_dict.map(
    preprocess_function,
    batched=True,
    remove_columns=datasets_dict['train'].column_names,
)
# %%
from transformers import AutoModelForSeq2SeqLM

model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to('cuda')
# %%
from transformers import DataCollatorForSeq2Seq

data_collator = DataCollatorForSeq2Seq(model=model, tokenizer=tokenizer)
# %%
import evaluate
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
metric = evaluate.load("sacrebleu")
# %%
import numpy as np


def compute_metrics(eval_preds):
    preds, labels = eval_preds
    # In case the model returns more than the prediction logits
    if isinstance(preds, tuple):
        preds = preds[0]

    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)

    # Replace -100s in the labels as we can't decode them
    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

    # Some simple post-processing
    decoded_preds = [pred.strip() for pred in decoded_preds]
    decoded_labels = [[label.strip()] for label in decoded_labels]

    result = metric.compute(predictions=decoded_preds, references=decoded_labels)
    return {"bleu": result["score"]}


# %%
from transformers import Seq2SeqTrainingArguments
batch_size = 16
logging_steps = (len(datasets_dict) / batch_size)/10

args = Seq2SeqTrainingArguments(
    output_dir='mbart_nl_zh',
    eval_strategy="no",
    save_strategy="epoch",
    learning_rate=1e-4,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=8,
    weight_decay=0.01,
    save_total_limit=3,
    num_train_epochs=3,
    predict_with_generate=True,
    fp16=True,
    logging_dir='logs',
    logging_steps=logging_steps,
)
# %%
from transformers import Seq2SeqTrainer

trainer = Seq2SeqTrainer(
    args=args,
    model=model,
    train_dataset=tokenized_datasets['train'],
    eval_dataset=tokenized_datasets['dev'],
    tokenizer=tokenizer,
    compute_metrics=compute_metrics,
    data_collator=data_collator,
)
print('----开始训练-----')
# %%
trainer.train(resume_from_checkpoint=True)

# %%
