import os

import torch

os.environ["TOKENIZERS_PARALLELISM"] = "false"
# os.environ["CUDA_LAUNCH_BLOCKING"] = "1"

from transformers import AutoModelForMaskedLM, AutoTokenizer, Trainer, TrainingArguments, \
    DataCollatorForLanguageModeling, default_data_collator
import pandas as pd
import re, os
from datasets import Dataset

device = torch.device('cuda')
# 加载模型和 tokenizer
model_checkpoint = "mbart-large-cc25"
model = AutoModelForMaskedLM.from_pretrained(model_checkpoint).to(device)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors='pt')


# 数据清洗函数
def clean_text(text):
    text = re.sub(r'[^\w\s]', '', text)
    text = ' '.join(text.split())
    return text


# 读取并清洗数据
def read_and_clean(file_paths):
    cleaned_rows = []
    names = os.listdir(file_paths)
    for name in names:
        file_path = os.path.join(file_paths, name)
        with open(file_path, 'r', encoding='utf-8') as file:
            for idx, line in enumerate(file):
                columns = line.strip()
                if name.endswith('nl'):
                    nl = columns
                    nl = clean_text(nl)
                    cleaned_rows.append({'text': nl})
                elif name.endswith('zh'):
                    zh = columns
                    zh = clean_text(zh)
                    cleaned_rows.append({'text': zh})
    df = pd.DataFrame(cleaned_rows)
    return df


# 加载并清洗数据
train_df = read_and_clean('train/mono')
raw_datasets = Dataset.from_pandas(train_df)

limited_dataset = raw_datasets.shuffle(seed=20).select(range(10000))

# 训练集和测试集拆分
split_datasets = raw_datasets.train_test_split(train_size=0.9, seed=20)


# %%

# Tokenizer 函数
def tokenize_function(examples):
    result = tokenizer(examples["text"])
    if tokenizer.is_fast:
        result["word_ids"] = [result.word_ids(i) for i in range(len(result["input_ids"]))]
    return result


# 数据集 Tokenize
tokenized_datasets = split_datasets.map(tokenize_function, batched=True, remove_columns=["text"])

# 文本分块
chunk_size = 128


# %%
def group_texts(examples):
    # Concatenate all texts
    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
    # Compute length of concatenated texts
    total_length = len(concatenated_examples[list(examples.keys())[0]])
    # We drop the last chunk if it's smaller than chunk_size
    total_length = (total_length // chunk_size) * chunk_size
    # Split by chunks of max_len
    result = {
        k: [t[i: i + chunk_size] for i in range(0, total_length, chunk_size)]
        for k, t in concatenated_examples.items()
    }
    # Create a new labels column
    result["labels"] = result["input_ids"].copy()
    return result


# 分块后的数据集
lm_datasets = tokenized_datasets.map(group_texts, batched=True)

# 数据采样器
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)

# 训练参数
batch_size = 16
logging_steps = len(split_datasets["train"]) // batch_size
model_name = model_checkpoint.split("/")[-1]
training_args = TrainingArguments(
    output_dir=f"{model_name}-finetuned",
    overwrite_output_dir=True,
    eval_strategy="epoch",
    learning_rate=2e-5,
    weight_decay=0.01,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    fp16=True,
    logging_steps=logging_steps,
    num_train_epochs = 6,
    save_steps = 10000,
)

# Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=lm_datasets["train"],
    eval_dataset=lm_datasets["test"],
    data_collator=data_collator,
    tokenizer=tokenizer,
)

# 检查是否存在之前训练的模型和优化器状态，如果存在则加载它们
print('----开始训练-----')
trainer.train(resume_from_checkpoint=True)

# 模型评估
import math

eval_results = trainer.evaluate()
print(f">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
