{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:01.883270Z",
     "start_time": "2024-08-11T03:20:01.071577400Z"
    }
   },
   "source": [
    "import pandas as pd\n",
    "import re\n",
    "\n",
    "# 定义清洗函数\n",
    "def clean_text(text):\n",
    "    text = re.sub(r'[^\\w\\s]', '', text)  # 去除特殊符号\n",
    "    text = ' '.join(text.split())  # 去除多余的空格\n",
    "    return text\n",
    "\n",
    "# 读取并清洗数据\n",
    "def read_and_clean(file_path):\n",
    "    cleaned_rows = []\n",
    "    with open(file_path, 'r', encoding='utf-8') as file:\n",
    "        for idx, line in enumerate(file):\n",
    "            columns = line.strip().split('\\t')\n",
    "            if len(columns) == 2:\n",
    "                zh, nl = columns\n",
    "                zh = clean_text(zh)\n",
    "                nl = clean_text(nl)\n",
    "                cleaned_rows.append({'id': idx, 'translation': {'zh': zh, 'nl': nl}})\n",
    "    \n",
    "    # 转换为 DataFrame\n",
    "    df = pd.DataFrame(cleaned_rows)\n",
    "    return df"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "from datasets import Dataset, DatasetDict\n",
    "\n",
    "# 加载并清洗数据\n",
    "train_df = read_and_clean('train/pair/train.nl-zh')\n",
    "dev_df = read_and_clean('dev/dev.nl-zh')\n",
    "\n",
    "# 转换为 Dataset 对象\n",
    "train_dataset = Dataset.from_pandas(train_df)\n",
    "dev_dataset = Dataset.from_pandas(dev_df)\n",
    "\n",
    "# 如果需要将两个数据集合并到 DatasetDict 中\n",
    "datasets_dict = DatasetDict({\n",
    "    'train': train_dataset,\n",
    "    'dev': dev_dataset\n",
    "})"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:06.819844700Z",
     "start_time": "2024-08-11T03:20:01.887788200Z"
    }
   },
   "id": "c82bb5f21edb976a",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "source": [
    "datasets_dict"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:06.837394100Z",
     "start_time": "2024-08-11T03:20:06.828362700Z"
    }
   },
   "id": "179816945ad7d857",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "source": [
    "datasets_dict['train'][0]['translation']"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:06.897558100Z",
     "start_time": "2024-08-11T03:20:06.841391100Z"
    }
   },
   "id": "bc1f68d0e55f004d",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "from transformers import AutoTokenizer\n",
    "\n",
    "model_path='mbart-large-cc25'\n",
    "tokenizer=AutoTokenizer.from_pretrained(model_path,return_tensors='pt')\n",
    "tokenizer.src_lang='nl_XX'\n",
    "tokenizer.tgt_lang='zh_CN'"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:09.341160400Z",
     "start_time": "2024-08-11T03:20:06.854440700Z"
    }
   },
   "id": "57a711017de4fc8b",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "source": [
    "max_length=128\n",
    "\n",
    "def preprocess_function(examples):\n",
    "    inputs=[ex['nl'] for ex in examples['translation']]\n",
    "    targets=[ex['zh'] for ex in examples['translation']]\n",
    "    model_inputs=tokenizer(\n",
    "        inputs,\n",
    "        text_target=targets,\n",
    "        max_length=max_length,\n",
    "        truncation=True,\n",
    "    )\n",
    "    return model_inputs"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:20:09.356907200Z",
     "start_time": "2024-08-11T03:20:09.346688900Z"
    }
   },
   "id": "63b567020cefdfb8",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "source": [
    "tokenized_datasets=datasets_dict.map(\n",
    "    preprocess_function,\n",
    "    batched=True,\n",
    "    remove_columns=datasets_dict['train'].column_names,\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:23:28.254226100Z",
     "start_time": "2024-08-11T03:23:13.337538900Z"
    }
   },
   "id": "14b45f56247b46d7",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "source": [
    "from transformers import AutoModelForSeq2SeqLM\n",
    "\n",
    "model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to('cuda')"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:24:16.688733900Z",
     "start_time": "2024-08-11T03:24:11.570375100Z"
    }
   },
   "id": "375adf5d4d4eeb9",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "source": [
    "from transformers import DataCollatorForSeq2Seq\n",
    "\n",
    "data_collator=DataCollatorForSeq2Seq(model=model,tokenizer=tokenizer)"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:25:14.107000200Z",
     "start_time": "2024-08-11T03:25:14.080299500Z"
    }
   },
   "id": "cb7bbc3c713aebe0",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "source": [
    "import evaluate\n",
    "\n",
    "metric = evaluate.load(\"sacrebleu\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:28:09.197312400Z",
     "start_time": "2024-08-11T03:25:57.866657600Z"
    }
   },
   "id": "657e0610ea2e8cf1",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "def compute_metrics(eval_preds):\n",
    "    preds, labels = eval_preds\n",
    "    # In case the model returns more than the prediction logits\n",
    "    if isinstance(preds, tuple):\n",
    "        preds = preds[0]\n",
    "\n",
    "    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n",
    "\n",
    "    # Replace -100s in the labels as we can't decode them\n",
    "    labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n",
    "    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n",
    "\n",
    "    # Some simple post-processing\n",
    "    decoded_preds = [pred.strip() for pred in decoded_preds]\n",
    "    decoded_labels = [[label.strip()] for label in decoded_labels]\n",
    "\n",
    "    result = metric.compute(predictions=decoded_preds, references=decoded_labels)\n",
    "    return {\"bleu\": result[\"score\"]}"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:28:09.213985200Z",
     "start_time": "2024-08-11T03:28:09.203459900Z"
    }
   },
   "id": "cecc7690a5a505bd",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "source": [
    "from transformers import Seq2SeqTrainingArguments\n",
    "\n",
    "args=Seq2SeqTrainingArguments(\n",
    "    output_dir = 'mbart_nl_zh',\n",
    "    eval_strategy=\"no\",\n",
    "    save_strategy=\"epoch\",\n",
    "    learning_rate=2e-5,\n",
    "    per_device_train_batch_size=8,\n",
    "    per_device_eval_batch_size=16,\n",
    "    weight_decay=0.01,\n",
    "    save_total_limit=3,\n",
    "    num_train_epochs=3,\n",
    "    predict_with_generate=True,\n",
    "    fp16=True,\n",
    "    logging_dir = 'logs',\n",
    "    logging_steps = 100,\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:33:44.674569500Z",
     "start_time": "2024-08-11T03:33:44.579433200Z"
    }
   },
   "id": "7cd8c650ce0f9a9b",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "source": [
    "from transformers import Seq2SeqTrainer\n",
    "\n",
    "trainer=Seq2SeqTrainer(\n",
    "    args=args,\n",
    "    model=model,\n",
    "    train_dataset=tokenized_datasets['train'],\n",
    "    eval_dataset=tokenized_datasets['dev'],\n",
    "    tokenizer=tokenizer,\n",
    "    compute_metrics=compute_metrics,\n",
    "    data_collator=data_collator,\n",
    ")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:33:46.192286600Z",
     "start_time": "2024-08-11T03:33:46.162736Z"
    }
   },
   "id": "64fb48d4da1d6315",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "source": [
    "trainer.train()"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T03:33:48.023849800Z",
     "start_time": "2024-08-11T03:33:46.985893900Z"
    }
   },
   "id": "4ad7cfb3c10b2260",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "translator = pipeline('translation',model='mbart-large-cc25')\n",
    "\n",
    "translator('hello')"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2024-08-11T09:03:29.811217200Z",
     "start_time": "2024-08-11T08:56:57.432821400Z"
    }
   },
   "id": "968bfb51f19fc643",
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [],
   "metadata": {
    "collapsed": false
   },
   "id": "b0602218674da15c",
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
