{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Readme\n",
    "将语法纠错看做是一个从错误句子翻译为正确句子的过程，利用先进的神经机器翻译模型进行解决。\n",
    "我们微调了大规模Seq2Seq预训练语言模型[中文BART](https://github.com/fastnlp/CPT)用于语法纠错任务。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Save to python file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[NbConvertApp] Converting notebook seq2seq.ipynb to script\n",
      "[NbConvertApp] Writing 18271 bytes to seq2seq.py\n"
     ]
    }
   ],
   "source": [
    "!jupyter nbconvert --to script seq2seq.ipynb"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### About data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Dependence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/anaconda3/envs/seq2seq-913/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import argparse\n",
    "import json\n",
    "import logging\n",
    "import os\n",
    "import random\n",
    "import sys\n",
    "import nltk\n",
    "import torch\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from transformers import BartForConditionalGeneration, BertTokenizer\n",
    "from opencc import OpenCC\n",
    "import re\n",
    "import numpy as np\n",
    "import torch\n",
    "import transformers\n",
    "from transformers import (AutoConfig, AutoModel, BertTokenizer,BertForTokenClassification,DataCollatorForTokenClassification, HfArgumentParser,DataCollatorForSeq2Seq,Seq2SeqTrainer,Seq2SeqTrainingArguments, Trainer, TrainerCallback,AutoModelForSeq2SeqLM,BartForConditionalGeneration)\n",
    "from transformers.trainer_utils import is_main_process\n",
    "from datasets import Dataset\n",
    "# Data loading lib\n",
    "from dataclasses import dataclass, field\n",
    "from datasets import Dataset\n",
    "# Enhanced typing\n",
    "from typing import Optional\n",
    "# Hugging Face lib\n",
    "from transformers import TrainerCallback\n",
    "# Metric\n",
    "from rouge import Rouge "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Utils"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_lines(file):\n",
    "    lines = []\n",
    "    for line in file:\n",
    "        line = line.rstrip(\"\\n\")\n",
    "        lines.append(line.replace(\" \", \"\"))\n",
    "    return lines\n",
    "\n",
    "def load_json(file_path):\n",
    "    results={'summarization':[],'article':[]}\n",
    "    with open(file_path,encoding='utf-8') as f:\n",
    "        content=json.load(f)\n",
    "        for sample in content:\n",
    "            results['summarization'].append(sample['summarization'])\n",
    "            results['article'].append(sample['article'])\n",
    "        results=Dataset.from_dict(results)\n",
    "    return results\n",
    "\n",
    "def convert_parallel_data_to_json_file(source_data_file, target_data_file, json_data_file):\n",
    "    json_list = []\n",
    "    with open(source_data_file, \"r\", encoding='utf-8') as f1:\n",
    "        with open(target_data_file, \"r\", encoding='utf-8') as f2:\n",
    "            src_lines, tgt_lines = read_lines(f1), read_lines(f2)\n",
    "            for s, t in zip(src_lines, tgt_lines):\n",
    "                if s != t:\n",
    "                    dic = {\"summarization\": t, \"article\": s}\n",
    "                    json_list.append(dic)   \n",
    "    json.dump(json_list, open(json_data_file, \"w\", encoding='utf-8'), ensure_ascii=False)\n",
    "\n",
    "def convert_input_data_to_json_file(input_data_file, json_data_file):\n",
    "    json_list = []\n",
    "    with open(input_data_file, \"r\", encoding='utf-8') as f:\n",
    "        src_lines = read_lines(f)\n",
    "        for s in src_lines:\n",
    "            dic = {\"summarization\": \"\", \"article\": s}\n",
    "            json_list.append(dic)   \n",
    "    json.dump(json_list, open(json_data_file, \"w\", encoding='utf-8'), ensure_ascii=False)\n",
    "\n",
    "def preprocess_function(examples):\n",
    "    # 获取输入文本，假设 text_column 是数据集中文本的列名\n",
    "    inputs = examples[text_column]\n",
    "    \n",
    "    # 获取目标摘要，假设 summary_column 是数据集中摘要的列名\n",
    "    targets = examples[summary_column]\n",
    "    \n",
    "    # 使用 tokenizer 对输入文本进行编码\n",
    "    # max_length 参数用于指定输入的最大长度\n",
    "    # padding 参数控制是否填充输入，使其达到相同的长度\n",
    "    # truncation 参数控制当输入文本长度超过 max_length 时，是否截断\n",
    "    model_inputs = tokenizer(inputs, max_length= 100, padding = False, truncation=True)\n",
    "\n",
    "    # 使用 tokenizer 对目标摘要进行编码\n",
    "    # 需要使用 as_target_tokenizer 来区分输入和目标的编码，确保目标的特殊处理\n",
    "    with tokenizer.as_target_tokenizer():\n",
    "        # 对目标摘要进行编码，设置最大长度、填充和截断方式\n",
    "        labels = tokenizer(targets, max_length= 100 , padding= False, truncation=True)\n",
    "\n",
    "    # 将编码后的摘要的 input_ids 作为标签存入 model_inputs 中\n",
    "    model_inputs[\"labels\"] = labels[\"input_ids\"]\n",
    "    \n",
    "    # 返回包含输入和标签的字典，供模型训练使用\n",
    "    return model_inputs\n",
    "    \n",
    "def postprocess_text(preds, labels):\n",
    "    preds = [pred.strip() for pred in preds]\n",
    "    labels = [label.strip() for label in labels]\n",
    "    while '' in preds:\n",
    "        idx=preds.index('')\n",
    "        preds[idx]='。'\n",
    "\n",
    "    return preds, labels\n",
    "\n",
    "def compute_metrics(eval_preds):\n",
    "    # try\n",
    "    rouge = Rouge()\n",
    "\n",
    "    preds, labels = eval_preds\n",
    "    if isinstance(preds, tuple):\n",
    "        preds = preds[0]\n",
    "    decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n",
    "    ignore_pad_token_for_loss = True\n",
    "    if ignore_pad_token_for_loss:\n",
    "        # Replace -100 in the labels as we can't decode them.\n",
    "        labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n",
    "    decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n",
    "   \n",
    "    # Some simple post-processing\n",
    "    decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n",
    "    scores = rouge.get_scores(decoded_preds, decoded_labels,avg=True)\n",
    "    for key in scores:\n",
    "        scores[key]=scores[key]['f']*100\n",
    "\n",
    "    result=scores\n",
    "\n",
    "    prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n",
    "    result[\"gen_len\"] = np.mean(prediction_lens)\n",
    "    result = {k: round(v, 4) for k, v in result.items()}\n",
    "    return result\n",
    "\n",
    "def split_data_file(input_file, src_output_file, tgt_output_file):\n",
    "    \"\"\"这个函数用来分割一种特定的txt文本数据来得到两个分开的病句和改正的情况数据\n",
    "\n",
    "    Args:\n",
    "        input_file (_type_): _description_\n",
    "        src_output_file (_type_): _description_\n",
    "        tgt_output_file (_type_): _description_\n",
    "    \"\"\"\n",
    "    with open(input_file, 'r', encoding='utf-8') as infile, \\\n",
    "         open(src_output_file, 'w', encoding='utf-8') as src_outfile, \\\n",
    "         open(tgt_output_file, 'w', encoding='utf-8') as tgt_outfile:\n",
    "        \n",
    "        for line in infile:\n",
    "            # 拆分每行的数据，假设用制表符（\\t）分隔\n",
    "            parts = line.strip().split(\"\\t\")\n",
    "            if len(parts) == 3:\n",
    "                index, src_sentence, tgt_sentence = parts\n",
    "                src_outfile.write(src_sentence + '\\n')\n",
    "                tgt_outfile.write(tgt_sentence + '\\n')\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Unit Test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "' \\n这种方式还是有问题.\\n'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Create Data json file\n",
    "def TEST_convert_parallel_data_to_json_file():\n",
    "    source_data_file = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/data/MuCGEC/MuCGEC_dev.txt\"\n",
    "    json_data_file   = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/data/valid.json\"\n",
    "    convert_input_data_to_json_file(source_data_file,json_data_file)\n",
    "# TEST_convert_parallel_data_to_json_file()\n",
    "\"\"\" \n",
    "这种方式还是有问题.\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据分割\n",
    "# 使用这个函数进行文件的分割\n",
    "input_file = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/data/MuCGEC/example_pred_dev.txt\"  \n",
    "src_output_file = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/data/MuCGEC_CGED_Dev_src.txt\"  \n",
    "tgt_output_file = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/data/MuCGEC_CGED_Dev_tgt.txt\" \n",
    "\n",
    "split_data_file(input_file, src_output_file, tgt_output_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 调用你现有的函数进行JSON生成\n",
    "convert_parallel_data_to_json_file(src_output_file, tgt_output_file, \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/data/train.json\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data preprocessing"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Modal Define"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def split_sentence(document: str, flag: str = \"all\", limit: int = 510):\n",
    "    \"\"\"\n",
    "    Args:\n",
    "        document:\n",
    "        flag: Type:str, \"all\" 中英文标点分句，\"zh\" 中文标点分句，\"en\" 英文标点分句\n",
    "        limit: 默认单句最大长度为510个字符\n",
    "    Returns: Type:list\n",
    "    \"\"\"\n",
    "    sent_list = []\n",
    "    try:\n",
    "        if flag == \"zh\":\n",
    "            document = re.sub('(?P<quotation_mark>([。？！](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>([。？！])[”’\"\\'])', r'\\g<quotation_mark>\\n', document)  # 特殊引号\n",
    "        elif flag == \"en\":\n",
    "            document = re.sub('(?P<quotation_mark>([.?!](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 英文单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>([?!.][\"\\']))', r'\\g<quotation_mark>\\n', document)  # 特殊引号\n",
    "        else:\n",
    "            document = re.sub('(?P<quotation_mark>([。？！….?!](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>(([。？！.!?]|…{1,2})[”’\"\\']))', r'\\g<quotation_mark>\\n',\n",
    "                              document)  # 特殊引号\n",
    "\n",
    "        sent_list_ori = document.splitlines()\n",
    "        for sent in sent_list_ori:\n",
    "            sent = sent.strip()\n",
    "            if not sent:\n",
    "                continue\n",
    "            else:\n",
    "                while len(sent) > limit:\n",
    "                    temp = sent[0:limit]\n",
    "                    sent_list.append(temp)\n",
    "                    sent = sent[limit:]\n",
    "                sent_list.append(sent)\n",
    "    except:\n",
    "        sent_list.clear()\n",
    "        sent_list.append(document)\n",
    "    return sent_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_model(sents):\n",
    "    # 设置生成的返回序列数量\n",
    "    num_ret_seqs = 1\n",
    "    # 设置beam search的宽度\n",
    "    beam = 5\n",
    "    # 输入序列的最大长度\n",
    "    inp_max_len = 100\n",
    "\n",
    "    # 对输入句子列表进行编码，生成token化后的输入，padding为最大长度，输入序列长度限制为inp_max_len\n",
    "    batch = [tokenizer(s, return_tensors='pt', padding='max_length', max_length=inp_max_len) for s in sents]\n",
    "    \n",
    "    # original index to final batch index, 保存原始句子的索引和最终batch中句子的对应关系\n",
    "    oidx2bidx = {}\n",
    "    \n",
    "    # 用于存储符合长度限制的句子\n",
    "    final_batch = []\n",
    "    \n",
    "    # 遍历batch中的每个元素\n",
    "    for oidx, elm in enumerate(batch):\n",
    "        # 检查每个输入的长度是否小于等于最大长度限制inp_max_len\n",
    "        if elm['input_ids'].size(1) <= inp_max_len:\n",
    "            # 保存原始索引和batch索引之间的映射\n",
    "            oidx2bidx[oidx] = len(final_batch)\n",
    "            # 将符合长度要求的句子加入到final_batch中\n",
    "            final_batch.append(elm)\n",
    "\n",
    "    # 将batch中的每个部分（如input_ids和attention_mask）按行拼接\n",
    "    batch = {key: torch.cat([elm[key] for elm in final_batch], dim=0) for key in final_batch[0]}\n",
    "    \n",
    "    # 关闭梯度计算，因为我们只进行推理，不需要计算梯度\n",
    "    with torch.no_grad():\n",
    "        # 使用模型生成输出，指定输入id和attention mask，并应用beam search进行推理\n",
    "        generated_ids = model.generate(\n",
    "            batch['input_ids'].cuda(),  # 将input_ids放到GPU上\n",
    "            attention_mask=batch['attention_mask'].cuda(),  # 将attention_mask放到GPU上\n",
    "            num_beams=beam,  # beam search的宽度\n",
    "            num_return_sequences=num_ret_seqs,  # 每个输入返回多少个生成的序列\n",
    "            max_length=inp_max_len  # 生成的最大长度\n",
    "        )\n",
    "    \n",
    "    # 将生成的id转换为人类可读的文本，并跳过特殊token\n",
    "    _out = tokenizer.batch_decode(generated_ids.detach().cpu(), skip_special_tokens=True)\n",
    "    \n",
    "    # 初始化输出列表\n",
    "    outs = []\n",
    "    \n",
    "    # 根据返回的生成序列数，整理输出\n",
    "    for i in range(0, len(_out), num_ret_seqs):\n",
    "        outs.append(_out[i:i+num_ret_seqs])\n",
    "    \n",
    "    # 根据原始输入句子的顺序，构造最终的输出\n",
    "    final_outs = [\n",
    "        [sents[oidx]] if oidx not in oidx2bidx else outs[oidx2bidx[oidx]]  # 如果句子未被处理，返回原始句子；否则返回生成结果\n",
    "        for oidx in range(len(sents))\n",
    "    ]\n",
    "    \n",
    "    # 返回最终的生成结果\n",
    "    return final_outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict():\n",
    "    sents = [l.strip() for l in open(args.input_path)]  # 分句\n",
    "    subsents = []\n",
    "    s_map = []\n",
    "    for i, sent in enumerate(sents):  # 将篇章划分为子句，分句预测再合并\n",
    "        subsent_list = split_sentence(sent, \"zh\")\n",
    "        s_map.extend([i for _ in range(len(subsent_list))])\n",
    "        subsents.extend(subsent_list)\n",
    "    assert len(subsents) == len(s_map)\n",
    "    b_size = args.batch_size\n",
    "    outs = []\n",
    "    for j in tqdm(range(0, len(subsents), b_size)):\n",
    "        sents_batch = subsents[j:j+b_size]\n",
    "        outs_batch = run_model(sents_batch)\n",
    "        for sent, preds in zip(sents_batch, outs_batch):\n",
    "            outs.append({'src': sent, 'preds': preds})\n",
    "    results = [\"\" for _ in range(len(sents))]\n",
    "    with open(args.output_path, 'w') as outf:\n",
    "        for i, out in enumerate(outs):\n",
    "            results[s_map[i]] += cc.convert(out['preds'][0])\n",
    "        for res in results:\n",
    "            outf.write(res + \"\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Variable Initialization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型随机种子\n",
    "SEED = 2024\n",
    "# Hugging Face 预训练模型仓库Page\n",
    "PRETRAIN_MODEL = \"fnlp/bart-large-chinese\"\n",
    "# 训练结果产物位置\n",
    "MODEL_DIR = \"./checkpoints/lang8+lang8_bart-large-chinese\"\n",
    "# 下游任务名称\n",
    "TASK_NAME = \"gec\"\n",
    "# 可用计算设备编号\n",
    "CUDA_DEVICE = \"0,1,2,3,4,5,6,7\"\n",
    "DATASET_NAME = \"gec\"\n",
    "SAVE_PATH = \"./checkpoints/\"\n",
    "SEED = 2024\n",
    "CHECKPOINT_SEED_PATH = SAVE_PATH + \"/run_\" + str(SEED)\n",
    "LENGTH_MAP = {\"lcsts\": \"30\", \"csl\": \"50\", \"adgen\": \"128\", \"gec\": \"100\"}\n",
    "VAL_MAX_TARGET_LENGTH_GEC = 100 \n",
    "TRAIN_FILE_PATH = \"/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/data/train.json\"\n",
    "datasets={}\n",
    "data_files = {}\n",
    "data_files[\"train\"] = TRAIN_FILE_PATH\n",
    "for key in data_files:\n",
    "    datasets[key]=load_json(data_files[key])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Training Phase"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "text_column = \"article\"\n",
    "summary_column = \"summarization\"\n",
    "column_names = datasets[\"train\"].column_names\n",
    "max_target_length = VAL_MAX_TARGET_LENGTH_GEC\n",
    "padding = False\n",
    "train_dataset = datasets[\"train\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting tokenizing...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/anaconda3/envs/seq2seq-913/lib/python3.8/site-packages/transformers/tokenization_utils_base.py:1601: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "print(\"Starting tokenizing...\")\n",
    "tokenizer = BertTokenizer.from_pretrained(PRETRAIN_MODEL)\n",
    "#tokenizer = BertTokenizer.from_pretrained(PRETRAIN_MODEL, force_download=True)\n",
    "#tokenizer = BertTokenizer.from_pretrained(PRETRAIN_MODEL, cache_dir='/root/MyCode/Chinese_Seq2Edit/MuCGEC/models/pretrained')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# LOCAL_DOWNLOADED_MODAL = \"/root/.cache/huggingface/hub/models--fnlp--bart-large-chinese\"\n",
    "model = BartForConditionalGeneration.from_pretrained(PRETRAIN_MODEL)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.config.max_length = VAL_MAX_TARGET_LENGTH_GEC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map:   0%|          | 0/990 [00:00<?, ? examples/s]/opt/anaconda3/envs/seq2seq-913/lib/python3.8/site-packages/transformers/tokenization_utils_base.py:4126: UserWarning: `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.\n",
      "  warnings.warn(\n",
      "Map: 100%|██████████| 990/990 [00:00<00:00, 1301.76 examples/s]\n"
     ]
    }
   ],
   "source": [
    "train_dataset = train_dataset.map(\n",
    "    preprocess_function,\n",
    "    batched=True,\n",
    "    remove_columns=column_names\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "ignore_pad_token_for_loss = True\n",
    "fp16 = False\n",
    "# Data collator\n",
    "label_pad_token_id = (\n",
    "    -100 if ignore_pad_token_for_loss else tokenizer.pad_token_id\n",
    ")\n",
    "data_collator = DataCollatorForSeq2Seq(\n",
    "    tokenizer,\n",
    "    model=model,\n",
    "    label_pad_token_id=label_pad_token_id,\n",
    "    pad_to_multiple_of = 8 if fp16 else None,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Seq2SeqTrainingArguments(\n",
      "_n_gpu=2,\n",
      "accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},\n",
      "adafactor=False,\n",
      "adam_beta1=0.9,\n",
      "adam_beta2=0.999,\n",
      "adam_epsilon=1e-08,\n",
      "auto_find_batch_size=False,\n",
      "batch_eval_metrics=False,\n",
      "bf16=False,\n",
      "bf16_full_eval=False,\n",
      "data_seed=None,\n",
      "dataloader_drop_last=False,\n",
      "dataloader_num_workers=4,\n",
      "dataloader_persistent_workers=False,\n",
      "dataloader_pin_memory=True,\n",
      "dataloader_prefetch_factor=None,\n",
      "ddp_backend=None,\n",
      "ddp_broadcast_buffers=None,\n",
      "ddp_bucket_cap_mb=None,\n",
      "ddp_find_unused_parameters=None,\n",
      "ddp_timeout=1800,\n",
      "debug=[],\n",
      "deepspeed=None,\n",
      "disable_tqdm=False,\n",
      "dispatch_batches=None,\n",
      "do_eval=True,\n",
      "do_predict=False,\n",
      "do_train=True,\n",
      "eval_accumulation_steps=None,\n",
      "eval_delay=0,\n",
      "eval_do_concat_batches=True,\n",
      "eval_on_start=False,\n",
      "eval_steps=None,\n",
      "eval_strategy=epoch,\n",
      "eval_use_gather_object=False,\n",
      "evaluation_strategy=epoch,\n",
      "fp16=True,\n",
      "fp16_backend=auto,\n",
      "fp16_full_eval=False,\n",
      "fp16_opt_level=O1,\n",
      "fsdp=[],\n",
      "fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},\n",
      "fsdp_min_num_params=0,\n",
      "fsdp_transformer_layer_cls_to_wrap=None,\n",
      "full_determinism=False,\n",
      "generation_config=None,\n",
      "generation_max_length=None,\n",
      "generation_num_beams=None,\n",
      "gradient_accumulation_steps=4,\n",
      "gradient_checkpointing=False,\n",
      "gradient_checkpointing_kwargs=None,\n",
      "greater_is_better=True,\n",
      "group_by_length=True,\n",
      "half_precision_backend=auto,\n",
      "hub_always_push=False,\n",
      "hub_model_id=None,\n",
      "hub_private_repo=False,\n",
      "hub_strategy=every_save,\n",
      "hub_token=<HUB_TOKEN>,\n",
      "ignore_data_skip=False,\n",
      "include_inputs_for_metrics=False,\n",
      "include_num_input_tokens_seen=False,\n",
      "include_tokens_per_second=False,\n",
      "jit_mode_eval=False,\n",
      "label_names=None,\n",
      "label_smoothing_factor=0.0,\n",
      "learning_rate=3e-05,\n",
      "length_column_name=length,\n",
      "load_best_model_at_end=True,\n",
      "local_rank=0,\n",
      "log_level=passive,\n",
      "log_level_replica=warning,\n",
      "log_on_each_node=True,\n",
      "logging_dir=./logs,\n",
      "logging_first_step=False,\n",
      "logging_nan_inf_filter=True,\n",
      "logging_steps=500,\n",
      "logging_strategy=steps,\n",
      "lr_scheduler_kwargs={},\n",
      "lr_scheduler_type=polynomial,\n",
      "max_grad_norm=1.0,\n",
      "max_steps=-1,\n",
      "metric_for_best_model=accuracy,\n",
      "mp_parameters=,\n",
      "neftune_noise_alpha=None,\n",
      "no_cuda=False,\n",
      "num_train_epochs=10,\n",
      "optim=adamw_torch,\n",
      "optim_args=None,\n",
      "optim_target_modules=None,\n",
      "output_dir=./results,\n",
      "overwrite_output_dir=True,\n",
      "past_index=-1,\n",
      "per_device_eval_batch_size=32,\n",
      "per_device_train_batch_size=32,\n",
      "predict_with_generate=True,\n",
      "prediction_loss_only=False,\n",
      "push_to_hub=False,\n",
      "push_to_hub_model_id=None,\n",
      "push_to_hub_organization=None,\n",
      "push_to_hub_token=<PUSH_TO_HUB_TOKEN>,\n",
      "ray_scope=last,\n",
      "remove_unused_columns=True,\n",
      "report_to=[],\n",
      "restore_callback_states_from_checkpoint=False,\n",
      "resume_from_checkpoint=None,\n",
      "run_name=./results,\n",
      "save_on_each_node=False,\n",
      "save_only_model=False,\n",
      "save_safetensors=True,\n",
      "save_steps=500,\n",
      "save_strategy=epoch,\n",
      "save_total_limit=3,\n",
      "seed=42,\n",
      "skip_memory_metrics=True,\n",
      "sortish_sampler=False,\n",
      "split_batches=None,\n",
      "tf32=None,\n",
      "torch_compile=False,\n",
      "torch_compile_backend=None,\n",
      "torch_compile_mode=None,\n",
      "torch_empty_cache_steps=None,\n",
      "torchdynamo=None,\n",
      "tpu_metrics_debug=False,\n",
      "tpu_num_cores=None,\n",
      "use_cpu=False,\n",
      "use_ipex=False,\n",
      "use_legacy_prediction_loop=False,\n",
      "use_mps_device=False,\n",
      "warmup_ratio=0.0,\n",
      "warmup_steps=1000,\n",
      "weight_decay=0.0,\n",
      ")\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/anaconda3/envs/seq2seq-913/lib/python3.8/site-packages/transformers/training_args.py:1525: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead\n",
      "  warnings.warn(\n",
      "/opt/anaconda3/envs/seq2seq-913/lib/python3.8/site-packages/torch/cuda/__init__.py:128: UserWarning: CUDA initialization: The NVIDIA driver on your system is too old (found version 10020). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver. (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:108.)\n",
      "  return torch._C._cuda_getDeviceCount() > 0\n"
     ]
    }
   ],
   "source": [
    "# 构造 Seq2SeqTrainingArguments\n",
    "training_args = Seq2SeqTrainingArguments(\n",
    "    output_dir='./results',                  # 保存模型和结果的路径\n",
    "    overwrite_output_dir=True,               # 是否覆盖已存在的输出目录\n",
    "    do_train=True,                           # 是否进行训练\n",
    "    do_eval=True,                            # 是否进行验证\n",
    "    per_device_train_batch_size=32,          # 每个设备的训练批大小\n",
    "    per_device_eval_batch_size=32,           # 每个设备的验证批大小\n",
    "    num_train_epochs=10,                     # 训练的总轮数\n",
    "    learning_rate=3e-5,                      # 学习率\n",
    "    evaluation_strategy=\"epoch\",             # 验证策略，可以是 'steps' 或 'epoch'\n",
    "    save_strategy=\"epoch\",                   # 保存模型策略，可以是 'steps' 或 'epoch'\n",
    "    save_total_limit=3,                      # 保存的模型数量上限\n",
    "    load_best_model_at_end=True,             # 是否在训练结束时加载最佳模型\n",
    "    metric_for_best_model=\"accuracy\",        # 选择最佳模型时的评价指标\n",
    "    predict_with_generate=True,              # 预测时是否使用生成器\n",
    "    fp16=True,                               # 是否使用16位浮点数精度\n",
    "    seed=42,                                 # 随机种子\n",
    "    logging_dir='./logs',                    # 日志保存路径\n",
    "    logging_steps=500,                       # 每多少步记录一次日志\n",
    "    dataloader_num_workers=4,                # 数据加载器使用的工作进程数量\n",
    "    gradient_accumulation_steps=4,           # 梯度累积步数，适用于大批次训练\n",
    "    lr_scheduler_type='polynomial',          # 学习率调度策略\n",
    "    warmup_steps=1000,                       # 学习率预热步数\n",
    "    report_to=\"none\",                        # 禁用报告工具（如TensorBoard、WandB）\n",
    "    group_by_length=True,                    # 根据样本长度分组，以提高效率\n",
    ")\n",
    "\n",
    "print(training_args)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n"
     ]
    }
   ],
   "source": [
    "# Initialize our Trainer\n",
    "do_train = True\n",
    "do_eval = False\n",
    "trainer = Seq2SeqTrainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset = train_dataset if do_train else None,\n",
    "    eval_dataset=eval_dataset if do_eval else None,\n",
    "    tokenizer=tokenizer,\n",
    "    data_collator=data_collator,\n",
    "    # if training_args.predict_with_generate else None,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/root/MyCode/Chinese_Seq2Edit/MuCGEC/models'"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pwd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Training\n",
    "train_result = trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.save_model()  # Saves the tokenizer too for easy upload\n",
    "\n",
    "metrics = train_result.metrics\n",
    "max_train_samples = (\n",
    "    data_args.max_train_samples\n",
    "    if data_args.max_train_samples is not None\n",
    "    else len(train_dataset)\n",
    ")\n",
    "metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n",
    "\n",
    "trainer.log_metrics(\"train\", metrics)\n",
    "trainer.save_metrics(\"train\", metrics)\n",
    "trainer.save_state()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Evaluation Phase"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\" \n",
    "if training_args.do_eval:\n",
    "    max_target_length = data_args.val_max_target_length\n",
    "    if \"validation\" not in datasets:\n",
    "        raise ValueError(\"--do_eval requires a validation dataset\")\n",
    "    eval_dataset = datasets[\"validation\"]\n",
    "    if data_args.max_val_samples is not None:\n",
    "        eval_dataset = eval_dataset.select(range(data_args.max_val_samples))\n",
    "    eval_dataset = eval_dataset.map(\n",
    "        preprocess_function,\n",
    "        batched=True,\n",
    "        num_proc=data_args.preprocessing_num_workers,\n",
    "        remove_columns=column_names,\n",
    "        load_from_cache_file=not data_args.overwrite_cache,\n",
    "    )\n",
    "    max_eval_num=30000\n",
    "    if len(eval_dataset)>max_eval_num:\n",
    "        eval_dataset=Dataset.from_dict(eval_dataset[:max_eval_num])\n",
    "    print(len(eval_dataset)) \"\"\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Inference Phase"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "' \\nif training_args.do_predict:\\n    max_target_length = data_args.val_max_target_length\\n    if \"test\" not in datasets:\\n        raise ValueError(\"--do_predict requires a test dataset\")\\n    test_dataset = datasets[\"test\"]\\n    if data_args.max_test_samples is not None:\\n        test_dataset = test_dataset.select(range(data_args.max_test_samples))\\n    test_dataset = test_dataset.map(\\n        preprocess_function,\\n        batched=True,\\n        batch_size=32,\\n        num_proc=data_args.preprocessing_num_workers,\\n        remove_columns=column_names,\\n        load_from_cache_file=not data_args.overwrite_cache,\\n    )\\n\\n\\nif training_args.do_predict:\\n    if training_args.predict_with_generate:\\n        predictions, labels, metrics = trainer.predict(test_dataset, metric_key_prefix=\"predict\")\\n        test_preds = tokenizer.batch_decode(\\n            predictions, skip_special_tokens=True,\\n        )\\n        test_preds = [\"\".join(pred.strip().split()) for pred in test_preds]\\n        output_test_preds_file = args.predict_file\\n        with open(output_test_preds_file, \"w\",encoding=\\'UTF-8\\') as writer:\\n            writer.write(\"\\n\".join(test_preds))\\n '"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "MODEL_PATH=\"./exps/seq2seq_lang8\"\n",
    "RESULT_DIR=\"$MODEL_PATH/results\"\n",
    "# mkdir -p $RESULT_DIR\n",
    "INPUT_FILE=\"../../data/test_data/MuCGEC/MuCGEC-ALL/MuCGEC_ALL_Test.input\" # 输入文件（无需分字）\n",
    "OUTPUT_FILE=\"$RESULT_DIR/MuCGEC_test.output\" # 输出文件\n",
    "\n",
    "# echo \"Generating...\"\n",
    "SECONDS = 0 \n",
    "\n",
    "\"\"\" CUDA_VISIBLE_DEVICES=0 python -u predict.py \\\n",
    "    --model_path $MODEL_PATH \\\n",
    "    --input_path $INPUT_FILE \\\n",
    "    --output_path $OUTPUT_FILE ;\n",
    " \"\"\"\n",
    "#echo \"Generating Finish!\"\n",
    "#duration=$SECONDS\n",
    "# echo \"$(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed.\"\n",
    "\n",
    "\"\"\" parser = argparse.ArgumentParser()\n",
    "parser.add_argument('-m', '--model_path')\n",
    "parser.add_argument('-i', '--input_path')\n",
    "parser.add_argument('-o', '--output_path')\n",
    "parser.add_argument('-b', '--batch_size', default=50)\n",
    "args = parser.parse_args() \"\"\"\n",
    "\"\"\" \n",
    "cc = OpenCC(\"t2s\")\n",
    "tokenizer=BertTokenizer.from_pretrained(args.model_path)\n",
    "model=BartForConditionalGeneration.from_pretrained(args.model_path)\n",
    "model.eval()\n",
    "model.half()\n",
    "model.cuda()\n",
    " \"\"\"\n",
    "\"\"\" \n",
    "if training_args.do_predict:\n",
    "    max_target_length = data_args.val_max_target_length\n",
    "    if \"test\" not in datasets:\n",
    "        raise ValueError(\"--do_predict requires a test dataset\")\n",
    "    test_dataset = datasets[\"test\"]\n",
    "    if data_args.max_test_samples is not None:\n",
    "        test_dataset = test_dataset.select(range(data_args.max_test_samples))\n",
    "    test_dataset = test_dataset.map(\n",
    "        preprocess_function,\n",
    "        batched=True,\n",
    "        batch_size=32,\n",
    "        num_proc=data_args.preprocessing_num_workers,\n",
    "        remove_columns=column_names,\n",
    "        load_from_cache_file=not data_args.overwrite_cache,\n",
    "    )\n",
    "\n",
    "\n",
    "if training_args.do_predict:\n",
    "    if training_args.predict_with_generate:\n",
    "        predictions, labels, metrics = trainer.predict(test_dataset, metric_key_prefix=\"predict\")\n",
    "        test_preds = tokenizer.batch_decode(\n",
    "            predictions, skip_special_tokens=True,\n",
    "        )\n",
    "        test_preds = [\"\".join(pred.strip().split()) for pred in test_preds]\n",
    "        output_test_preds_file = args.predict_file\n",
    "        with open(output_test_preds_file, \"w\",encoding='UTF-8') as writer:\n",
    "            writer.write(\"\\n\".join(test_preds))\n",
    " \"\"\"\n",
    "# predict()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
