{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8\n",
    "import argparse\n",
    "import logging\n",
    "import os\n",
    "import time\n",
    "from random import seed\n",
    "import torch\n",
    "from allennlp.data import allennlp_collate\n",
    "from allennlp.data.dataloader import PyTorchDataLoader\n",
    "from allennlp.data.samplers import BucketBatchSampler\n",
    "from allennlp.data.token_indexers import PretrainedTransformerIndexer\n",
    "from allennlp.modules import Embedding\n",
    "from allennlp.modules.token_embedders import PretrainedTransformerEmbedder\n",
    "from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN\n",
    "from allennlp.data.vocabulary import Vocabulary\n",
    "from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder\n",
    "from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset\n",
    "from allennlp.training import GradientDescentTrainer\n",
    "from allennlp.training.learning_rate_schedulers import ReduceOnPlateauLearningRateScheduler\n",
    "from torch.utils.data import DataLoader\n",
    "from allennlp.training.optimizers import AdamOptimizer\n",
    "from gector.datareader import Seq2LabelsDatasetReader\n",
    "from gector.seq2labels_model import Seq2Labels\n",
    "from allennlp.training.tensorboard_writer import TensorboardWriter\n",
    "\n",
    "def fix_seed(s):\n",
    "    \"\"\"\n",
    "    固定随机种子\n",
    "    \"\"\"\n",
    "    torch.manual_seed(s)\n",
    "    seed(s)\n",
    "\n",
    "def get_token_indexers(model_name):\n",
    "    \"\"\"\n",
    "    获取token编号器（主要是不同预训练BERT模型的子词算法不同，因而index策略也不同）\n",
    "    :param model_name: 模型名称\n",
    "    :return: 返回token编号器\n",
    "    \"\"\"\n",
    "    bert_token_indexer = PretrainedTransformerIndexer(model_name=model_name, namespace=\"bert\")\n",
    "    return {'bert': bert_token_indexer}\n",
    "\n",
    "def get_token_embedders(model_name, tune_bert=False):\n",
    "    \"\"\"\n",
    "    获取token嵌入器\n",
    "    :param model_name: 模型名称\n",
    "    :param tune_bert: 是否微调\n",
    "    :return: token文本域嵌入器\n",
    "    \"\"\"\n",
    "    take_grads = True if tune_bert > 0 else False\n",
    "    bert_token_emb = PretrainedTransformerEmbedder(model_name=model_name, last_layer_only=True,\n",
    "                                                   train_parameters=take_grads)\n",
    "    token_embedders = {'bert': bert_token_emb}\n",
    "\n",
    "    text_filed_emd = BasicTextFieldEmbedder(token_embedders=token_embedders)\n",
    "    return text_filed_emd\n",
    "\n",
    "def build_data_loaders(\n",
    "        data_set: AllennlpDataset,\n",
    "        batch_size: int,\n",
    "        num_workers: int,\n",
    "        shuffle: bool,\n",
    "        batches_per_epoch = None\n",
    "):\n",
    "    \"\"\"\n",
    "    创建数据载入器\n",
    "    :param batches_per_epoch:\n",
    "    :param data_set: 数据集对象\n",
    "    :param batch_size: batch大小\n",
    "    :param num_workers: 同时使用多少个线程载入数据\n",
    "    :param shuffle: 是否打乱训练集\n",
    "    :return: 训练集、开发集、测试集数据载入器\n",
    "    \"\"\"\n",
    "    return PyTorchDataLoader(data_set, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle,\n",
    "                      collate_fn=allennlp_collate, batches_per_epoch=batches_per_epoch)\n",
    "\n",
    "def get_data_reader(model_name, max_len, skip_correct=False, skip_complex=0,\n",
    "                    test_mode=False, tag_strategy=\"keep_one\",\n",
    "                    broken_dot_strategy=\"keep\",\n",
    "                    tn_prob=0, tp_prob=1, ):\n",
    "    token_indexers = get_token_indexers(model_name)\n",
    "    reader = Seq2LabelsDatasetReader(token_indexers=token_indexers,\n",
    "                                     max_len=max_len,\n",
    "                                     skip_correct=skip_correct,\n",
    "                                     skip_complex=skip_complex,\n",
    "                                     test_mode=test_mode,\n",
    "                                     tag_strategy=tag_strategy,\n",
    "                                     broken_dot_strategy=broken_dot_strategy,\n",
    "                                     lazy=True,\n",
    "                                     tn_prob=tn_prob,\n",
    "                                     tp_prob=tp_prob)\n",
    "    return reader\n",
    "\n",
    "\n",
    "def get_model(model_name, vocab, tune_bert=False, predictor_dropout=0,\n",
    "              label_smoothing=0.0,\n",
    "              confidence=0,\n",
    "              model_dir=\"\",\n",
    "              log=None):\n",
    "    token_embs = get_token_embedders(model_name, tune_bert=tune_bert)\n",
    "    model = Seq2Labels(vocab=vocab,\n",
    "                       text_field_embedder=token_embs,\n",
    "                       predictor_dropout=predictor_dropout,\n",
    "                       label_smoothing=label_smoothing,\n",
    "                       confidence=confidence,\n",
    "                       model_dir=model_dir,\n",
    "                       cuda_device=args.cuda_device,\n",
    "                       dev_file=args.dev_set,\n",
    "                       logger=log,\n",
    "                       vocab_path=args.vocab_path,\n",
    "                       weight_name=args.weights_name,\n",
    "                       save_metric=args.save_metric\n",
    "                       )\n",
    "    return model\n",
    "\n",
    "\n",
    "def main(args):\n",
    "    fix_seed(args.seed)\n",
    "    if not os.path.exists(args.model_dir):\n",
    "        os.mkdir(args.model_dir)\n",
    "    logger = logging.getLogger(__file__)\n",
    "    logger.setLevel(level=logging.INFO)\n",
    "    start_time = time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime())\n",
    "    handler = logging.FileHandler(args.model_dir + '/logs_{:s}.txt'.format(str(start_time)))\n",
    "    handler.setLevel(logging.INFO)\n",
    "    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n",
    "    handler.setFormatter(formatter)\n",
    "    logger.addHandler(handler)\n",
    "\n",
    "    weights_name = args.weights_name\n",
    "    reader = get_data_reader(weights_name, args.max_len, skip_correct=bool(args.skip_correct),\n",
    "                             skip_complex=args.skip_complex,\n",
    "                             test_mode=False,\n",
    "                             tag_strategy=args.tag_strategy,\n",
    "                             tn_prob=args.tn_prob,\n",
    "                             tp_prob=args.tp_prob)\n",
    "    train_data = reader.read(args.train_set)\n",
    "    dev_data = reader.read(args.dev_set)\n",
    "\n",
    "    default_tokens = [DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN]\n",
    "    namespaces = ['labels', 'd_tags']\n",
    "    tokens_to_add = {x: default_tokens for x in namespaces}\n",
    "\n",
    "    # build vocab\n",
    "    if args.vocab_path:\n",
    "        vocab = Vocabulary.from_files(args.vocab_path)\n",
    "    else:\n",
    "        vocab = Vocabulary.from_instances(train_data,\n",
    "                                          min_count={\"labels\": 5},\n",
    "                                          tokens_to_add=tokens_to_add)\n",
    "        vocab.save_to_files(args.vocab_path)\n",
    "\n",
    "    print(\"Data is loaded\")\n",
    "    logger.info(\"Data is loaded\")\n",
    "\n",
    "    model = get_model(weights_name, vocab,\n",
    "                      tune_bert=args.tune_bert,\n",
    "                      predictor_dropout=args.predictor_dropout,\n",
    "                      label_smoothing=args.label_smoothing,\n",
    "                      model_dir=os.path.join(args.model_dir, args.model_name + '.th'),\n",
    "                      log=logger)\n",
    "\n",
    "    device = torch.device(\"cuda:\" + str(args.cuda_device) if int(args.cuda_device) >= 0 else \"cpu\")\n",
    "    if args.pretrain:  # 只加载部分预训练模型\n",
    "        pretrained_dict = torch.load(os.path.join(args.pretrain_folder, args.pretrain + '.th'), map_location='cpu')\n",
    "        model_dict = model.state_dict()\n",
    "        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}\n",
    "        model_dict.update(pretrained_dict)\n",
    "        model.load_state_dict(model_dict)\n",
    "        print('load pretrained model')\n",
    "        logger.info('load pretrained model')\n",
    "\n",
    "    model = model.to(device)\n",
    "    print(\"Model is set\")\n",
    "    logger.info(\"Model is set\")\n",
    "\n",
    "    parameters = [\n",
    "        (n, p)\n",
    "        for n, p in model.named_parameters() if p.requires_grad\n",
    "    ]\n",
    "    \n",
    "    # 使用Adam算法进行SGD\n",
    "    optimizer = AdamOptimizer(parameters, lr=args.lr, betas=(0.9, 0.999))\n",
    "    scheduler = ReduceOnPlateauLearningRateScheduler(optimizer)\n",
    "    train_data.index_with(vocab)\n",
    "    dev_data.index_with(vocab)\n",
    "    tensorboardWriter = TensorboardWriter(args.model_dir)\n",
    "    trainer = GradientDescentTrainer(\n",
    "        model=model,\n",
    "        data_loader=build_data_loaders(train_data, batch_size=args.batch_size, num_workers=0, shuffle=False, batches_per_epoch=args.updates_per_epoch),\n",
    "        validation_data_loader=build_data_loaders(dev_data, batch_size=args.batch_size, num_workers=0, shuffle=False),\n",
    "        num_epochs=args.n_epoch,\n",
    "        optimizer=optimizer,\n",
    "        patience=args.patience,\n",
    "        validation_metric=args.save_metric,\n",
    "        cuda_device=device,\n",
    "        num_gradient_accumulation_steps=args.accumulation_size,\n",
    "        learning_rate_scheduler=scheduler,\n",
    "        tensorboard_writer=tensorboardWriter,\n",
    "        use_amp=True  # 混合精度训练，如果显卡不支持请设为false\n",
    "    )\n",
    "    print(\"Start training\")\n",
    "    print('\\nepoch: 0')\n",
    "    logger.info(\"Start training\")\n",
    "    logger.info('epoch: 0')\n",
    "    trainer.train()\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # read parameters\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument('--train_set',\n",
    "                        help='Path to the train data',\n",
    "                        required=True)  # 训练集路径（带标签格式）\n",
    "    parser.add_argument('--dev_set',\n",
    "                        help='Path to the dev data',\n",
    "                        required=True)  # 开发集路径（带标签格式）\n",
    "    parser.add_argument('--model_dir',\n",
    "                        help='Path to the model dir',\n",
    "                        required=True)  # 模型保存路径\n",
    "    parser.add_argument('--model_name',\n",
    "                        help='The name of saved checkpoint',\n",
    "                        required=True)  # 模型名称\n",
    "    parser.add_argument('--vocab_path',\n",
    "                        help='Path to the model vocabulary directory.'\n",
    "                             'If not set then build vocab from data',\n",
    "                        default=\"./data/output_vocabulary_chinese_char_hsk+lang8_5\")  # 词表路径\n",
    "    parser.add_argument('--batch_size',\n",
    "                        type=int,\n",
    "                        help='The size of the batch.',\n",
    "                        default=256)  # batch大小（句子数目）\n",
    "    parser.add_argument('--max_len',\n",
    "                        type=int,\n",
    "                        help='The max sentence length'\n",
    "                             '(all longer will be truncated)',\n",
    "                        default=200)  # 最大输入长度，过长句子将被截断\n",
    "    parser.add_argument('--target_vocab_size',\n",
    "                        type=int,\n",
    "                        help='The size of target vocabularies.',\n",
    "                        default=1000)  # 词表规模（生成词表时才需要）\n",
    "    parser.add_argument('--n_epoch',\n",
    "                        type=int,\n",
    "                        help='The number of epoch for training model.',\n",
    "                        default=2)  # 训练轮数\n",
    "    parser.add_argument('--patience',\n",
    "                        type=int,\n",
    "                        help='The number of epoch with any improvements'\n",
    "                             ' on validation set.',\n",
    "                        default=3)  # 早停轮数\n",
    "    parser.add_argument('--skip_correct',\n",
    "                        type=int,\n",
    "                        help='If set than correct sentences will be skipped '\n",
    "                             'by data reader.',\n",
    "                        default=1)  # 是否跳过正确句子\n",
    "    parser.add_argument('--skip_complex', \n",
    "                        type=int,\n",
    "                        help='If set than complex corrections will be skipped '\n",
    "                             'by data reader.',\n",
    "                        choices=[0, 1, 2, 3, 4, 5],\n",
    "                        default=0)  # 是否跳过复杂句子\n",
    "    parser.add_argument('--tune_bert',\n",
    "                        type=int,\n",
    "                        help='If more then 0 then fine tune bert.',\n",
    "                        default=0)  # 是否微调bert\n",
    "    parser.add_argument('--tag_strategy',\n",
    "                        choices=['keep_one', 'merge_all'],\n",
    "                        help='The type of the data reader behaviour.',\n",
    "                        default='keep_one')  # 标签抽取策略，前者每个位置只保留一个标签，后者保留所有标签\n",
    "    parser.add_argument('--lr',\n",
    "                        type=float,\n",
    "                        help='Set initial learning rate.',\n",
    "                        default=1e-3)  # 初始学习率\n",
    "    parser.add_argument('--predictor_dropout',\n",
    "                        type=float,\n",
    "                        help='The value of dropout for predictor.',\n",
    "                        default=0.0)  # dropout率（除bert以外部分）\n",
    "    parser.add_argument('--label_smoothing',\n",
    "                        type=float,\n",
    "                        help='The value of parameter alpha for label smoothing.',\n",
    "                        default=0.0)  # 标签平滑\n",
    "    parser.add_argument('--tn_prob',\n",
    "                        type=float,\n",
    "                        help='The probability to take TN from data.',\n",
    "                        default=0)  # 保留正确句子的比例\n",
    "    parser.add_argument('--tp_prob', \n",
    "                        type=float,\n",
    "                        help='The probability to take TP from data.',\n",
    "                        default=1)  # 保留错误句子的比例\n",
    "    parser.add_argument('--pretrain_folder',\n",
    "                        help='The name of the pretrain folder.',\n",
    "                        default=None)  # 之前已经训练好的checkpoint的文件夹\n",
    "    parser.add_argument('--pretrain',  \n",
    "                        help='The name of the pretrain weights in pretrain_folder param.',\n",
    "                        default=None)  # 之前已经训练好的checkpoint名称\n",
    "    parser.add_argument('--cuda_device',\n",
    "                        help='The number of GPU',\n",
    "                        default=0)  # 使用GPU编号\n",
    "    parser.add_argument('--accumulation_size',\n",
    "                        type=int,\n",
    "                        help='How many batches do you want accumulate.',\n",
    "                        default=1)  # 梯度累积\n",
    "    parser.add_argument('--weights_name',\n",
    "                        type=str,\n",
    "                        default=\"chinese-struct-bert\")  # 预训练语言模型路径\n",
    "    parser.add_argument('--save_metric',\n",
    "                        type=str,\n",
    "                        choices=[\"+labels_accuracy\", \"+labels_accuracy_except_keep\"],\n",
    "                        default=\"+labels_accuracy\")  # 模型保存指标\n",
    "    parser.add_argument('--updates_per_epoch',\n",
    "                        type=int,\n",
    "                        default=None)  # 每个epoch更新次数\n",
    "    parser.add_argument('--seed',\n",
    "                        type=int,\n",
    "                        default=1)  # 随机种子\n",
    "    args = parser.parse_args()\n",
    "    main(args)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Tokenization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# coding=utf-8\n",
    "# Copyright 2018 The Google AI Language Team Authors.\n",
    "#\n",
    "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "# you may not use this file except in compliance with the License.\n",
    "# You may obtain a copy of the License at\n",
    "#\n",
    "#     http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "# Unless required by applicable law or agreed to in writing, software\n",
    "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "# See the License for the specific language governing permissions and\n",
    "# limitations under the License.\n",
    "\"\"\"Tokenization classes.\"\"\"\n",
    "\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import collections\n",
    "import unicodedata\n",
    "import six\n",
    "\n",
    "\n",
    "def convert_to_unicode(text):\n",
    "  \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n",
    "  if six.PY3:\n",
    "    if isinstance(text, str):\n",
    "      return text\n",
    "    elif isinstance(text, bytes):\n",
    "      return text.decode(\"utf-8\", \"ignore\")\n",
    "    else:\n",
    "      raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n",
    "  elif six.PY2:\n",
    "    if isinstance(text, str):\n",
    "      return text.decode(\"utf-8\", \"ignore\")\n",
    "    elif isinstance(text, unicode):\n",
    "      return text\n",
    "    else:\n",
    "      raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n",
    "  else:\n",
    "    raise ValueError(\"Not running on Python2 or Python 3?\")\n",
    "\n",
    "\n",
    "def printable_text(text):\n",
    "  \"\"\"Returns text encoded in a way suitable for print or `tf.logging`.\"\"\"\n",
    "\n",
    "  # These functions want `str` for both Python2 and Python3, but in one case\n",
    "  # it's a Unicode string and in the other it's a byte string.\n",
    "  if six.PY3:\n",
    "    if isinstance(text, str):\n",
    "      return text\n",
    "    elif isinstance(text, bytes):\n",
    "      return text.decode(\"utf-8\", \"ignore\")\n",
    "    else:\n",
    "      raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n",
    "  elif six.PY2:\n",
    "    if isinstance(text, str):\n",
    "      return text\n",
    "    elif isinstance(text, unicode):\n",
    "      return text.encode(\"utf-8\")\n",
    "    else:\n",
    "      raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n",
    "  else:\n",
    "    raise ValueError(\"Not running on Python2 or Python 3?\")\n",
    "\n",
    "\n",
    "def load_vocab(vocab_file):\n",
    "  \"\"\"Loads a vocabulary file into a dictionary.\"\"\"\n",
    "  vocab = collections.OrderedDict()\n",
    "  index = 0\n",
    "  with open(vocab_file, \"r\") as reader:\n",
    "    while True:\n",
    "      token = convert_to_unicode(reader.readline())\n",
    "      if not token:\n",
    "        break\n",
    "      token = token.strip()\n",
    "      vocab[token] = index\n",
    "      index += 1\n",
    "  return vocab\n",
    "\n",
    "\n",
    "def convert_by_vocab(vocab, items):\n",
    "  \"\"\"Converts a sequence of [tokens|ids] using the vocab.\"\"\"\n",
    "  output = []\n",
    "  for item in items:\n",
    "    if item not in vocab:\n",
    "      print(\"warning: %s not in vocab\" % item)\n",
    "      item = \"[UNK]\"\n",
    "    output.append(vocab[item])\n",
    "  return output\n",
    "\n",
    "\n",
    "def convert_tokens_to_ids(vocab, tokens):\n",
    "  return convert_by_vocab(vocab, tokens)\n",
    "\n",
    "\n",
    "def convert_ids_to_tokens(inv_vocab, ids):\n",
    "  return convert_by_vocab(inv_vocab, ids)\n",
    "\n",
    "\n",
    "def whitespace_tokenize(text):\n",
    "  \"\"\"Runs basic whitespace cleaning and splitting on a peice of text.\"\"\"\n",
    "  text = text.strip()\n",
    "  if not text:\n",
    "    return []\n",
    "  tokens = text.split()\n",
    "  return tokens\n",
    "\n",
    "\n",
    "class FullTokenizer(object):\n",
    "  \"\"\"Runs end-to-end tokenziation.\"\"\"\n",
    "\n",
    "  def __init__(self, vocab_file, do_lower_case=True):\n",
    "    self.vocab = load_vocab(vocab_file)\n",
    "    self.inv_vocab = {v: k for k, v in self.vocab.items()}\n",
    "    self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n",
    "    self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)\n",
    "\n",
    "  def tokenize(self, text):\n",
    "    split_tokens = []\n",
    "    for token in self.basic_tokenizer.tokenize(text):\n",
    "      for sub_token in self.wordpiece_tokenizer.tokenize(token):\n",
    "        split_tokens.append(sub_token)\n",
    "\n",
    "    return split_tokens\n",
    "\n",
    "  def convert_tokens_to_ids(self, tokens):\n",
    "    return convert_by_vocab(self.vocab, tokens)\n",
    "\n",
    "  def convert_ids_to_tokens(self, ids):\n",
    "    return convert_by_vocab(self.inv_vocab, ids)\n",
    "\n",
    "\n",
    "class BasicTokenizer(object):\n",
    "  \"\"\"Runs basic tokenization (punctuation splitting, lower casing, etc.).\"\"\"\n",
    "\n",
    "  def __init__(self, do_lower_case=True):\n",
    "    \"\"\"Constructs a BasicTokenizer.\n",
    "    Args:\n",
    "      do_lower_case: Whether to lower case the input.\n",
    "    \"\"\"\n",
    "    self.do_lower_case = do_lower_case\n",
    "\n",
    "  def tokenize(self, text):\n",
    "    \"\"\"Tokenizes a piece of text.\"\"\"\n",
    "    text = convert_to_unicode(text)\n",
    "    text = self._clean_text(text)\n",
    "\n",
    "    # This was added on November 1st, 2018 for the multilingual and Chinese\n",
    "    # models. This is also applied to the English models now, but it doesn't\n",
    "    # matter since the English models were not trained on any Chinese data\n",
    "    # and generally don't have any Chinese data in them (there are Chinese\n",
    "    # characters in the vocabulary because Wikipedia does have some Chinese\n",
    "    # words in the English Wikipedia.).\n",
    "    text = self._tokenize_chinese_chars(text)\n",
    "\n",
    "    orig_tokens = whitespace_tokenize(text)\n",
    "    split_tokens = []\n",
    "    for token in orig_tokens:\n",
    "      if self.do_lower_case:\n",
    "        token = token.lower()\n",
    "        token = self._run_strip_accents(token)\n",
    "      split_tokens.extend(self._run_split_on_punc(token))\n",
    "\n",
    "    output_tokens = whitespace_tokenize(\" \".join(split_tokens))\n",
    "    return output_tokens\n",
    "\n",
    "  def _run_strip_accents(self, text):\n",
    "    \"\"\"Strips accents from a piece of text.\"\"\"\n",
    "    text = unicodedata.normalize(\"NFD\", text)\n",
    "    output = []\n",
    "    for char in text:\n",
    "      cat = unicodedata.category(char)\n",
    "      if cat == \"Mn\":\n",
    "        continue\n",
    "      output.append(char)\n",
    "    return \"\".join(output)\n",
    "\n",
    "  def _run_split_on_punc(self, text):\n",
    "    \"\"\"Splits punctuation on a piece of text.\"\"\"\n",
    "    chars = list(text)\n",
    "    i = 0\n",
    "    start_new_word = True\n",
    "    output = []\n",
    "    while i < len(chars):\n",
    "      char = chars[i]\n",
    "      if _is_punctuation(char):\n",
    "        output.append([char])\n",
    "        start_new_word = True\n",
    "      else:\n",
    "        if start_new_word:\n",
    "          output.append([])\n",
    "        start_new_word = False\n",
    "        output[-1].append(char)\n",
    "      i += 1\n",
    "\n",
    "    return [\"\".join(x) for x in output]\n",
    "\n",
    "  def _tokenize_chinese_chars(self, text):\n",
    "    \"\"\"Adds whitespace around any CJK character.\"\"\"\n",
    "    output = []\n",
    "    for char in text:\n",
    "      cp = ord(char)\n",
    "      if self._is_chinese_char(cp):\n",
    "        output.append(\" \")\n",
    "        output.append(char)\n",
    "        output.append(\" \")\n",
    "      else:\n",
    "        output.append(char)\n",
    "    return \"\".join(output)\n",
    "\n",
    "  def _is_chinese_char(self, cp):\n",
    "    \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n",
    "    # This defines a \"chinese character\" as anything in the CJK Unicode block:\n",
    "    #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n",
    "    #\n",
    "    # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n",
    "    # despite its name. The modern Korean Hangul alphabet is a different block,\n",
    "    # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n",
    "    # space-separated words, so they are not treated specially and handled\n",
    "    # like the all of the other languages.\n",
    "    if ((cp >= 0x4E00 and cp <= 0x9FFF) or  #\n",
    "        (cp >= 0x3400 and cp <= 0x4DBF) or  #\n",
    "        (cp >= 0x20000 and cp <= 0x2A6DF) or  #\n",
    "        (cp >= 0x2A700 and cp <= 0x2B73F) or  #\n",
    "        (cp >= 0x2B740 and cp <= 0x2B81F) or  #\n",
    "        (cp >= 0x2B820 and cp <= 0x2CEAF) or\n",
    "        (cp >= 0xF900 and cp <= 0xFAFF) or  #\n",
    "        (cp >= 0x2F800 and cp <= 0x2FA1F)):  #\n",
    "      return True\n",
    "\n",
    "    return False\n",
    "\n",
    "  def _clean_text(self, text):\n",
    "    \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n",
    "    output = []\n",
    "    for char in text:\n",
    "      cp = ord(char)\n",
    "      if cp == 0 or cp == 0xfffd or _is_control(char):\n",
    "        continue\n",
    "      if _is_whitespace(char):\n",
    "        output.append(\" \")\n",
    "      else:\n",
    "        output.append(char)\n",
    "    return \"\".join(output)\n",
    "\n",
    "\n",
    "class WordpieceTokenizer(object):\n",
    "  \"\"\"Runs WordPiece tokenziation.\"\"\"\n",
    "\n",
    "  def __init__(self, vocab, unk_token=\"[UNK]\", max_input_chars_per_word=100):\n",
    "    self.vocab = vocab\n",
    "    self.unk_token = unk_token\n",
    "    self.max_input_chars_per_word = max_input_chars_per_word\n",
    "\n",
    "  def tokenize(self, text):\n",
    "    \"\"\"Tokenizes a piece of text into its word pieces.\n",
    "    This uses a greedy longest-match-first algorithm to perform tokenization\n",
    "    using the given vocabulary.\n",
    "    For example:\n",
    "      input = \"unaffable\"\n",
    "      output = [\"un\", \"##aff\", \"##able\"]\n",
    "    Args:\n",
    "      text: A single token or whitespace separated tokens. This should have\n",
    "        already been passed through `BasicTokenizer.\n",
    "    Returns:\n",
    "      A list of wordpiece tokens.\n",
    "    \"\"\"\n",
    "\n",
    "    text = convert_to_unicode(text)\n",
    "\n",
    "    output_tokens = []\n",
    "    for token in whitespace_tokenize(text):\n",
    "      chars = list(token)\n",
    "      if len(chars) > self.max_input_chars_per_word:\n",
    "        output_tokens.append(self.unk_token)\n",
    "        continue\n",
    "\n",
    "      is_bad = False\n",
    "      start = 0\n",
    "      sub_tokens = []\n",
    "      while start < len(chars):\n",
    "        end = len(chars)\n",
    "        cur_substr = None\n",
    "        while start < end:\n",
    "          substr = \"\".join(chars[start:end])\n",
    "          if start > 0:\n",
    "            substr = \"##\" + substr\n",
    "          if substr in self.vocab:\n",
    "            cur_substr = substr\n",
    "            break\n",
    "          end -= 1\n",
    "        if cur_substr is None:\n",
    "          is_bad = True\n",
    "          break\n",
    "        sub_tokens.append(cur_substr)\n",
    "        start = end\n",
    "\n",
    "      if is_bad:\n",
    "        # output_tokens.append(self.unk_token)\n",
    "        output_tokens.append(token)  # keep the UNK token\n",
    "      else:\n",
    "        output_tokens.extend(sub_tokens)\n",
    "    return output_tokens\n",
    "\n",
    "\n",
    "def _is_whitespace(char):\n",
    "  \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n",
    "  # \\t, \\n, and \\r are technically contorl characters but we treat them\n",
    "  # as whitespace since they are generally considered as such.\n",
    "  if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n",
    "    return True\n",
    "  cat = unicodedata.category(char)\n",
    "  if cat == \"Zs\":\n",
    "    return True\n",
    "  return False\n",
    "\n",
    "\n",
    "def _is_control(char):\n",
    "  \"\"\"Checks whether `chars` is a control character.\"\"\"\n",
    "  # These are technically control characters but we count them as whitespace\n",
    "  # characters.\n",
    "  if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n",
    "    return False\n",
    "  cat = unicodedata.category(char)\n",
    "  if cat.startswith(\"C\"):\n",
    "    return True\n",
    "  return False\n",
    "\n",
    "\n",
    "def _is_punctuation(char):\n",
    "  \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n",
    "  cp = ord(char)\n",
    "  # We treat all non-letter/number ASCII as punctuation.\n",
    "  # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n",
    "  # Punctuation class but we treat them as punctuation anyways, for\n",
    "  # consistency.\n",
    "  if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n",
    "      (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n",
    "    return True\n",
    "  cat = unicodedata.category(char)\n",
    "  if cat.startswith(\"P\"):\n",
    "    return True\n",
    "  return False"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Predict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8\n",
    "import os\n",
    "from transformers import BertModel\n",
    "import torch\n",
    "import tokenization\n",
    "import argparse\n",
    "from gector.gec_model import GecBERTModel\n",
    "import re\n",
    "from opencc import OpenCC\n",
    "\n",
    "cc = OpenCC(\"t2s\")\n",
    "\n",
    "def split_sentence(document: str, flag: str = \"all\", limit: int = 510):\n",
    "    \"\"\"\n",
    "    Args:\n",
    "        document:\n",
    "        flag: Type:str, \"all\" 中英文标点分句，\"zh\" 中文标点分句，\"en\" 英文标点分句\n",
    "        limit: 默认单句最大长度为510个字符\n",
    "    Returns: Type:list\n",
    "    \"\"\"\n",
    "    sent_list = []\n",
    "    try:\n",
    "        if flag == \"zh\":\n",
    "            document = re.sub('(?P<quotation_mark>([。？！](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>([。？！])[”’\"\\'])', r'\\g<quotation_mark>\\n', document)  # 特殊引号\n",
    "        elif flag == \"en\":\n",
    "            document = re.sub('(?P<quotation_mark>([.?!](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 英文单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>([?!.][\"\\']))', r'\\g<quotation_mark>\\n', document)  # 特殊引号\n",
    "        else:\n",
    "            document = re.sub('(?P<quotation_mark>([。？！….?!](?![”’\"\\'])))', r'\\g<quotation_mark>\\n', document)  # 单字符断句符\n",
    "            document = re.sub('(?P<quotation_mark>(([。？！.!?]|…{1,2})[”’\"\\']))', r'\\g<quotation_mark>\\n',\n",
    "                              document)  # 特殊引号\n",
    "\n",
    "        sent_list_ori = document.splitlines()\n",
    "        for sent in sent_list_ori:\n",
    "            sent = sent.strip()\n",
    "            if not sent:\n",
    "                continue\n",
    "            else:\n",
    "                while len(sent) > limit:\n",
    "                    temp = sent[0:limit]\n",
    "                    sent_list.append(temp)\n",
    "                    sent = sent[limit:]\n",
    "                sent_list.append(sent)\n",
    "    except:\n",
    "        sent_list.clear()\n",
    "        sent_list.append(document)\n",
    "    return sent_list\n",
    "\n",
    "\n",
    "def predict_for_file(input_file, output_file, model, batch_size, log=True, seg=False):\n",
    "    with open(input_file, 'r', encoding='utf-8') as f:\n",
    "        lines = f.readlines()\n",
    "    sents = [s.strip() for s in lines]\n",
    "    subsents = []\n",
    "    s_map = []\n",
    "    for i, sent in enumerate(sents):  # 将篇章划分为子句，分句预测再合并\n",
    "        if seg:\n",
    "            subsent_list = split_sentence(sent, flag=\"zh\")\n",
    "        else:\n",
    "            subsent_list = [sent]\n",
    "        s_map.extend([i for _ in range(len(subsent_list))])\n",
    "        subsents.extend(subsent_list)\n",
    "    assert len(subsents) == len(s_map)\n",
    "    predictions = []\n",
    "    cnt_corrections = 0\n",
    "    batch = []\n",
    "    for sent in subsents:\n",
    "        batch.append(sent.split())\n",
    "        if len(batch) == batch_size:  # 如果数据够了一个batch的话，\n",
    "            preds, cnt = model.handle_batch(batch)\n",
    "            assert len(preds) == len(batch)\n",
    "            predictions.extend(preds)\n",
    "            cnt_corrections += cnt\n",
    "            if log:\n",
    "                for z in zip(batch, preds):\n",
    "                    print(\"source： \" + \"\".join(z[0]))\n",
    "                    print(\"target： \" + \"\".join(z[1]))\n",
    "                    print()\n",
    "            batch = []\n",
    "\n",
    "    if batch:\n",
    "        preds, cnt = model.handle_batch(batch)\n",
    "        assert len(preds) == len(batch)\n",
    "        predictions.extend(preds)\n",
    "        cnt_corrections += cnt\n",
    "        if log:\n",
    "            for z in zip(batch, preds):\n",
    "                print(\"source： \" + \"\".join(z[0]))\n",
    "                print(\"target： \" + \"\".join(z[1]))\n",
    "                print()\n",
    "\n",
    "    assert len(subsents) == len(predictions)\n",
    "    if output_file:\n",
    "        with open(output_file, 'w') as f1:\n",
    "            with open(output_file + \".char\", 'w') as f2:\n",
    "                results = [\"\" for _ in range(len(sents))]\n",
    "                for i, ret in enumerate(predictions):\n",
    "                    ret_new = [tok.lstrip(\"##\") for tok in ret]\n",
    "                    ret = cc.convert(\"\".join(ret_new))\n",
    "                    results[s_map[i]] += cc.convert(ret)\n",
    "                tokenizer = tokenization.FullTokenizer(vocab_file=\"vocab.txt\", do_lower_case=False)\n",
    "                for ret in results:\n",
    "                    f1.write(ret + \"\\n\")\n",
    "                    line = tokenization.convert_to_unicode(ret)\n",
    "                    tokens = tokenizer.tokenize(line)\n",
    "                    f2.write(\" \".join(tokens) + \"\\n\")\n",
    "    return cnt_corrections\n",
    "\n",
    "\n",
    "def main(args):\n",
    "    # get all paths\n",
    "    model = GecBERTModel(vocab_path=args.vocab_path,\n",
    "                         model_paths=args.model_path.split(','),\n",
    "                         weights_names=args.weights_name.split(','),\n",
    "                         max_len=args.max_len, min_len=args.min_len,\n",
    "                         iterations=args.iteration_count,\n",
    "                         min_error_probability=args.min_error_probability,\n",
    "                         min_probability=args.min_error_probability,\n",
    "                         log=False,\n",
    "                         confidence=args.additional_confidence,\n",
    "                         is_ensemble=args.is_ensemble,\n",
    "                         weigths=args.weights,\n",
    "                         cuda_device=args.cuda_device\n",
    "                         )\n",
    "    cnt_corrections = predict_for_file(args.input_file, args.output_file, model,\n",
    "                                                   batch_size=args.batch_size, log=args.log, seg=args.seg)\n",
    "    print(f\"Produced overall corrections: {cnt_corrections}\")\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # read parameters\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument('--model_path',\n",
    "                        help='Path to the model file',\n",
    "                        required=True)  # GECToR模型文件，多个模型集成的话，可以用逗号隔开\n",
    "    parser.add_argument('--weights_name',\n",
    "                        help='Path to the pre-trained language model',\n",
    "                        default='chinese-struct-bert',\n",
    "                        required=True)  # 预训练语言模型文件，多个模型集成的话，每个模型对应一个PLM，可以用逗号隔开\n",
    "    parser.add_argument('--vocab_path',\n",
    "                        help='Path to the vocab file',\n",
    "                        default='./data/output_vocabulary_chinese_char_hsk+lang8_5',\n",
    "                        )  # 词表文件\n",
    "    parser.add_argument('--input_file',\n",
    "                        help='Path to the input file',\n",
    "                        required=True)  # 输入文件，要求：预先分好词/字\n",
    "    parser.add_argument('--output_file',\n",
    "                        help='Path to the output file',\n",
    "                        required=True)  # 输出结果文件\n",
    "    parser.add_argument('--max_len',\n",
    "                        type=int,\n",
    "                        help='The max sentence length'\n",
    "                             '(all longer will be truncated)',\n",
    "                        default=200)  # 最大输入长度（token数目），大于该长度的输入将被截断\n",
    "    parser.add_argument('--min_len',\n",
    "                        type=int,\n",
    "                        help='The minimum sentence length'\n",
    "                             '(all longer will be returned w/o changes)',\n",
    "                        default=0)  # 最小修改长度（token数目），小于该长度的输入将不被修改\n",
    "    parser.add_argument('--batch_size',\n",
    "                        type=int,\n",
    "                        help='The number of sentences in a batch when predicting',\n",
    "                        default=128)  # 预测时的batch大小（句子数目）\n",
    "    parser.add_argument('--iteration_count',\n",
    "                        type=int,\n",
    "                        help='The number of iterations of the model',\n",
    "                        default=5)  # 迭代修改轮数\n",
    "    parser.add_argument('--additional_confidence',\n",
    "                        type=float,\n",
    "                        help='How many probability to add to $KEEP token',\n",
    "                        default=0.0)  # Keep标签额外置信度\n",
    "    parser.add_argument('--min_probability',\n",
    "                        type=float,\n",
    "                        default=0)  # token级别最小修改阈值\n",
    "    parser.add_argument('--min_error_probability',\n",
    "                        type=float,\n",
    "                        default=0.0)  # 句子级别最小修改阈值\n",
    "    parser.add_argument('--is_ensemble', \n",
    "                        type=int,\n",
    "                        help='Whether to do ensembling.',\n",
    "                        default=0)  # 是否进行模型融合\n",
    "    parser.add_argument('--weights',\n",
    "                        help='Used to calculate weighted average', nargs='+',\n",
    "                        default=None)  # 不同模型的权重（加权集成）\n",
    "    parser.add_argument('--cuda_device',  \n",
    "                        help='The number of GPU',\n",
    "                        default=0)  # 使用GPU编号\n",
    "    parser.add_argument('--log',  \n",
    "                        action='store_true')  # 是否输出完整信息\n",
    "    parser.add_argument('--seg',  \n",
    "                        action='store_true')  # 是否切分长句预测后再合并\n",
    "    args = parser.parse_args()\n",
    "    main(args)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Shell Pipeline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "shellscript"
    }
   },
   "outputs": [],
   "source": [
    "# Step1. Data Preprocessing\n",
    "\n",
    "## Download Structbert\n",
    "if [ ! -f ./plm/chinese-struct-bert-large/pytorch_model.bin ]; then\n",
    "    wget https://alice-open.oss-cn-zhangjiakou.aliyuncs.com/StructBERT/ch_model\n",
    "    mv ch_model ./plm/chinese-struct-bert-large/pytorch_model.bin\n",
    "fi\n",
    "\n",
    "## Tokenize\n",
    "SRC_FILE=../../data/train_data/lang8+hsk/train.src  # 每行一个病句\n",
    "TGT_FILE=../../data/train_data/lang8+hsk/train.tgt  # 每行一个正确句子，和病句一一对应\n",
    "if [ ! -f $SRC_FILE\".char\" ]; then\n",
    "    python ../../tools/segment/segment_bert.py < $SRC_FILE > $SRC_FILE\".char\"  # 分字\n",
    "fi\n",
    "if [ ! -f $TGT_FILE\".char\" ]; then\n",
    "    python ../../tools/segment/segment_bert.py < $TGT_FILE > $TGT_FILE\".char\"  # 分字\n",
    "fi\n",
    "\n",
    "## Generate label file\n",
    "LABEL_FILE=../../data/train_data/lang8+hsk/train.label  # 训练数据\n",
    "if [ ! -f $LABEL_FILE ]; then\n",
    "    python ./utils/preprocess_data.py -s $SRC_FILE\".char\" -t $TGT_FILE\".char\" -o $LABEL_FILE --worker_num 32\n",
    "    shuf $LABEL_FILE > $LABEL_FILE\".shuf\"\n",
    "fi\n",
    "\n",
    "# Step2. Training\n",
    "CUDA_DEVICE=0\n",
    "SEED=1\n",
    "\n",
    "DEV_SET=../../data/valid_data/MuCGEC_CGED_Dev.label\n",
    "MODEL_DIR=./exps/seq2edit_lang8\n",
    "if [ ! -d $MODEL_DIR ]; then\n",
    "  mkdir -p $MODEL_DIR\n",
    "fi\n",
    "\n",
    "PRETRAIN_WEIGHTS_DIR=./plm/chinese-struct-bert-large\n",
    "\n",
    "mkdir ${MODEL_DIR}/src_bak\n",
    "cp ./pipeline.sh $MODEL_DIR/src_bak\n",
    "cp -r ./gector $MODEL_DIR/src_bak\n",
    "cp ./train.py $MODEL_DIR/src_bak\n",
    "cp ./predict.py $MODEL_DIR/src_bak\n",
    "\n",
    "VOCAB_PATH=./data/output_vocabulary_chinese_char_hsk+lang8_5\n",
    "\n",
    "## Freeze encoder (Cold Step)\n",
    "COLD_LR=1e-3\n",
    "COLD_BATCH_SIZE=128\n",
    "COLD_MODEL_NAME=Best_Model_Stage_1\n",
    "COLD_EPOCH=2\n",
    "\n",
    "CUDA_VISIBLE_DEVICES=$CUDA_DEVICE python train.py --tune_bert 0\\\n",
    "                --train_set $LABEL_FILE\".shuf\"\\\n",
    "                --dev_set $DEV_SET\\\n",
    "                --model_dir $MODEL_DIR\\\n",
    "                --model_name $COLD_MODEL_NAME\\\n",
    "                --vocab_path $VOCAB_PATH\\\n",
    "                --batch_size $COLD_BATCH_SIZE\\\n",
    "                --n_epoch $COLD_EPOCH\\\n",
    "                --lr $COLD_LR\\\n",
    "                --weights_name $PRETRAIN_WEIGHTS_DIR\\\n",
    "                --seed $SEED\n",
    "\n",
    "## Unfreeze encoder\n",
    "LR=1e-5\n",
    "BATCH_SIZE=32\n",
    "ACCUMULATION_SIZE=4\n",
    "MODEL_NAME=Best_Model_Stage_2\n",
    "EPOCH=20\n",
    "PATIENCE=3\n",
    "\n",
    "CUDA_VISIBLE_DEVICES=$CUDA_DEVICE python train.py --tune_bert 1\\\n",
    "                --train_set $LABEL_FILE\".shuf\"\\\n",
    "                --dev_set $DEV_SET\\\n",
    "                --model_dir $MODEL_DIR\\\n",
    "                --model_name $MODEL_NAME\\\n",
    "                --vocab_path $VOCAB_PATH\\\n",
    "                --batch_size $BATCH_SIZE\\\n",
    "                --n_epoch $EPOCH\\\n",
    "                --lr $LR\\\n",
    "                --accumulation_size $ACCUMULATION_SIZE\\\n",
    "                --patience $PATIENCE\\\n",
    "                --weights_name $PRETRAIN_WEIGHTS_DIR\\\n",
    "                --pretrain_folder $MODEL_DIR\\\n",
    "                --pretrain \"Temp_Model\"\\\n",
    "                --seed $SEED\n",
    "\n",
    "\n",
    "# Step3. Inference\n",
    "MODEL_PATH=$MODEL_DIR\"/Best_Model_Stage_2.th\"\n",
    "RESULT_DIR=$MODEL_DIR\"/results\"\n",
    "\n",
    "INPUT_FILE=../../data/test_data/MuCGEC/MuCGEC-ALL/MuCGEC_ALL_Test.input # 输入文件\n",
    "if [ ! -f $INPUT_FILE\".char\" ]; then\n",
    "    python ../../tools/segment/segment_bert.py < $INPUT_FILE > $INPUT_FILE\".char\"  # 分字\n",
    "fi\n",
    "if [ ! -d $RESULT_DIR ]; then\n",
    "  mkdir -p $RESULT_DIR\n",
    "fi\n",
    "OUTPUT_FILE=$RESULT_DIR\"/MuCGEC_test.output\"\n",
    "\n",
    "echo \"Generating...\"\n",
    "SECONDS=0\n",
    "CUDA_VISIBLE_DEVICES=$CUDA_DEVICE python predict.py --model_path $MODEL_PATH\\\n",
    "                  --weights_name $PRETRAIN_WEIGHTS_DIR\\\n",
    "                  --vocab_path $VOCAB_PATH\\\n",
    "                  --input_file $INPUT_FILE\".char\"\\\n",
    "                  --output_file $OUTPUT_FILE --log\n",
    "\n",
    "echo \"Generating Finish!\"\n",
    "duration=$SECONDS\n",
    "echo \"$(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed.\"\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Preprocess data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "from collections import defaultdict\n",
    "from difflib import SequenceMatcher\n",
    "import Levenshtein\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "from helpers import write_lines, read_parallel_lines, encode_verb_form, \\\n",
    "    apply_reverse_transformation, SEQ_DELIMETERS, START_TOKEN\n",
    "from multiprocessing import Pool\n",
    "\n",
    "\n",
    "def perfect_align(t, T, insertions_allowed=0,\n",
    "                  cost_function=Levenshtein.distance):\n",
    "    # dp[i, j, k] is a minimal cost of matching first `i` tokens of `t` with\n",
    "    # first `j` tokens of `T`, after making `k` insertions after last match of\n",
    "    # token from `t`. In other words t[:i] aligned with T[:j].\n",
    "\n",
    "    # Initialize with INFINITY (unknown)\n",
    "    shape = (len(t) + 1, len(T) + 1, insertions_allowed + 1)  # 2,3,1\n",
    "    dp = np.ones(shape, dtype=int) * int(1e9)\n",
    "    come_from = np.ones(shape, dtype=int) * int(1e9)\n",
    "    come_from_ins = np.ones(shape, dtype=int) * int(1e9)\n",
    "\n",
    "    dp[0, 0, 0] = 0  # The only known starting point. Nothing matched to nothing.\n",
    "    for i in range(len(t) + 1):  # Go inclusive                       \n",
    "        for j in range(len(T) + 1):  # Go inclusive                   \n",
    "            for q in range(insertions_allowed + 1):  # Go inclusive   \n",
    "                if i < len(t):\n",
    "                    # Given matched sequence of t[:i] and T[:j], match token\n",
    "                    # t[i] with following tokens T[j:k].\n",
    "                    for k in range(j, len(T) + 1):  \n",
    "                        transform = apply_transformation(t[i], '   '.join(T[j:k])) \n",
    "                        if transform: \n",
    "                            cost = 0\n",
    "                        else:\n",
    "                            cost = cost_function(t[i], '   '.join(T[j:k])) \n",
    "                        current = dp[i, j, q] + cost\n",
    "                        if dp[i + 1, k, 0] > current:\n",
    "                            dp[i + 1, k, 0] = current\n",
    "                            come_from[i + 1, k, 0] = j\n",
    "                            come_from_ins[i + 1, k, 0] = q\n",
    "                if q < insertions_allowed:\n",
    "                    # Given matched sequence of t[:i] and T[:j], create\n",
    "                    # insertion with following tokens T[j:k].\n",
    "                    for k in range(j, len(T) + 1):\n",
    "                        cost = len('   '.join(T[j:k]))\n",
    "                        current = dp[i, j, q] + cost\n",
    "                        if dp[i, k, q + 1] > current:\n",
    "                            dp[i, k, q + 1] = current\n",
    "                            come_from[i, k, q + 1] = j\n",
    "                            come_from_ins[i, k, q + 1] = q\n",
    "\n",
    "    # Solution is in the dp[len(t), len(T), *]. Backtracking from there.\n",
    "    alignment = []\n",
    "    i = len(t)\n",
    "    j = len(T)\n",
    "    q = dp[i, j, :].argmin()\n",
    "    while i > 0 or q > 0:\n",
    "        is_insert = (come_from_ins[i, j, q] != q) and (q != 0)\n",
    "        j, k, q = come_from[i, j, q], j, come_from_ins[i, j, q]\n",
    "        if not is_insert:\n",
    "            i -= 1\n",
    "\n",
    "        if is_insert:\n",
    "            alignment.append(['INSERT', T[j:k], (i, i)])\n",
    "        else:\n",
    "            alignment.append([f'REPLACE_{t[i]}', T[j:k], (i, i + 1)])  \n",
    "\n",
    "    assert j == 0\n",
    "\n",
    "    return dp[len(t), len(T)].min(), list(reversed(alignment))\n",
    "\n",
    "\n",
    "def _split(token):\n",
    "    if not token:\n",
    "        return []\n",
    "    parts = token.split()\n",
    "    return parts or [token]\n",
    "\n",
    "\n",
    "# 查看是否有合并操作和交换操作。\n",
    "def apply_merge_transformation(source_tokens, target_words, shift_idx):\n",
    "    edits = []\n",
    "    if len(source_tokens) > 1 and len(target_words) == 1:\n",
    "        # check merge\n",
    "        transform = check_merge(source_tokens, target_words)\n",
    "        if transform:\n",
    "            for i in range(len(source_tokens) - 1):\n",
    "                edits.append([(shift_idx + i, shift_idx + i + 1), transform])\n",
    "            return edits\n",
    "\n",
    "    if len(source_tokens) == len(target_words) == 2:\n",
    "        # check swap\n",
    "        transform = check_swap(source_tokens, target_words)\n",
    "        if transform:\n",
    "            edits.append([(shift_idx, shift_idx + 1), transform])\n",
    "    return edits\n",
    "\n",
    "\n",
    "# delimeter 分隔符\n",
    "def is_sent_ok(sent, delimeters=SEQ_DELIMETERS):\n",
    "    for del_val in delimeters.values():\n",
    "        if del_val in sent and del_val != \" \":\n",
    "            return False\n",
    "    return True\n",
    "\n",
    "\n",
    "def check_casetype(source_token, target_token):\n",
    "    if source_token.lower() != target_token.lower():\n",
    "        return None\n",
    "    if source_token.lower() == target_token:\n",
    "        return \"$TRANSFORM_CASE_LOWER\"\n",
    "    elif source_token.capitalize() == target_token:\n",
    "        return \"$TRANSFORM_CASE_CAPITAL\"\n",
    "    elif source_token.upper() == target_token:\n",
    "        return \"$TRANSFORM_CASE_UPPER\"\n",
    "    elif source_token[1:].capitalize() == target_token[1:] and source_token[0] == target_token[0]:\n",
    "        return \"$TRANSFORM_CASE_CAPITAL_1\"\n",
    "    elif source_token[:-1].upper() == target_token[:-1] and source_token[-1] == target_token[-1]:\n",
    "        return \"$TRANSFORM_CASE_UPPER_-1\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_equal(source_token, target_token):\n",
    "    if source_token == target_token:\n",
    "        return \"$KEEP\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_split(source_token, target_tokens):\n",
    "    if source_token.split(\"-\") == target_tokens:\n",
    "        return \"$TRANSFORM_SPLIT_HYPHEN\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_merge(source_tokens, target_tokens):\n",
    "    if \"\".join(source_tokens) == \"\".join(target_tokens):\n",
    "        return \"$MERGE_SPACE\"\n",
    "    elif \"-\".join(source_tokens) == \"-\".join(target_tokens):\n",
    "        return \"$MERGE_HYPHEN\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_swap(source_tokens, target_tokens):\n",
    "    if source_tokens == [x for x in reversed(target_tokens)]:\n",
    "        return \"$MERGE_SWAP\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_plural(source_token, target_token):\n",
    "    if source_token.endswith(\"s\") and source_token[:-1] == target_token:\n",
    "        return \"$TRANSFORM_AGREEMENT_SINGULAR\"\n",
    "    elif target_token.endswith(\"s\") and source_token == target_token[:-1]:\n",
    "        return \"$TRANSFORM_AGREEMENT_PLURAL\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def check_verb(source_token, target_token):\n",
    "    encoding = encode_verb_form(source_token, target_token)\n",
    "    if encoding:\n",
    "        return f\"$TRANSFORM_VERB_{encoding}\"\n",
    "    else:\n",
    "        return None\n",
    "\n",
    "\n",
    "def apply_transformation(source_token, target_token):\n",
    "    target_tokens = target_token.split()\n",
    "    if len(target_tokens) > 1:\n",
    "        # check split\n",
    "        transform = check_split(source_token, target_tokens)\n",
    "        if transform:\n",
    "            return transform\n",
    "    checks = [check_equal, check_casetype, check_verb, check_plural]\n",
    "    for check in checks:\n",
    "        transform = check(source_token, target_token)\n",
    "        if transform:\n",
    "            return transform\n",
    "    return None\n",
    "\n",
    "\n",
    "def align_sequences(args):\n",
    "    source_sent, target_sent = args\n",
    "    # check if sent is OK\n",
    "    if not is_sent_ok(source_sent) or not is_sent_ok(target_sent):\n",
    "        return None\n",
    "    source_tokens = source_sent.split()\n",
    "    target_tokens = target_sent.split()\n",
    "    matcher = SequenceMatcher(None, source_tokens, target_tokens)\n",
    "    diffs = list(matcher.get_opcodes())\n",
    "    all_edits = []\n",
    "    for idx, diff in enumerate(diffs):\n",
    "        tag, i1, i2, j1, j2 = diff\n",
    "        source_part = _split(\" \".join(source_tokens[i1:i2]))\n",
    "        target_part = _split(\" \".join(target_tokens[j1:j2]))\n",
    "        if tag == 'equal':\n",
    "            continue\n",
    "        elif tag == 'delete':\n",
    "            # delete all words separatly 6 7 6 6 | 7 8 6 7 | 8 8 7 8\n",
    "            for j in range(i2 - i1):\n",
    "                edit = [(i1 + j, i1 + j + 1), '$DELETE']\n",
    "                all_edits.append(edit)\n",
    "        elif tag == 'insert':\n",
    "            # append to the previous word\n",
    "            for target_token in target_part:\n",
    "                edit = ((i1 - 1, i1), f\"$APPEND_{target_token}\")\n",
    "                all_edits.append(edit)\n",
    "\n",
    "        else:\n",
    "            # check merge first of all\n",
    "            edits = apply_merge_transformation(source_part, target_part,\n",
    "                                               shift_idx=i1)\n",
    "            if edits:\n",
    "                all_edits.extend(edits)\n",
    "                continue\n",
    "\n",
    "            # normalize alignments if need (make them singleton)\n",
    "            _, alignments = perfect_align(source_part, target_part,\n",
    "                                          insertions_allowed=0)\n",
    "            for alignment in alignments:\n",
    "                new_shift = alignment[2][0]\n",
    "                edits = convert_alignments_into_edits(alignment,\n",
    "                                                      shift_idx=i1 + new_shift)  # i1：源序列开始字符\n",
    "                all_edits.extend(edits)\n",
    "\n",
    "    # get labels\n",
    "    labels = convert_edits_into_labels(source_tokens, all_edits)  # 把编辑转化为具体的标签。\n",
    "    # match tags to source tokens\n",
    "    sent_with_tags = add_labels_to_the_tokens(source_tokens, labels)\n",
    "\n",
    "    # 把带标签e的句子转换成目标句子\n",
    "    check_sent = convert_tagged_line(sent_with_tags)\n",
    "\n",
    "    # 如果标签e作用于原来的句子，与原来句子不一样。\n",
    "    if \"\".join(check_sent.split()) != \"\".join(target_sent.split()):\n",
    "        # do it again for debugging\n",
    "        print(f\"Incorrect pair: \\n{source_sent}\\n{target_sent}\\n{check_sent}\")\n",
    "\n",
    "    return sent_with_tags, labels\n",
    "\n",
    "\n",
    "# 转换编辑到labels\n",
    "def convert_edits_into_labels(source_tokens, all_edits):\n",
    "    # make sure that edits are flat\n",
    "    flat_edits = []\n",
    "    for edit in all_edits:\n",
    "        (start, end), edit_operations = edit\n",
    "        if isinstance(edit_operations, list):\n",
    "            for operation in edit_operations:\n",
    "                new_edit = [(start, end), operation]\n",
    "                flat_edits.append(new_edit)\n",
    "        elif isinstance(edit_operations, str):\n",
    "            flat_edits.append(edit)\n",
    "        else:\n",
    "            raise Exception(\"Unknown operation type\")\n",
    "    all_edits = flat_edits[:]\n",
    "    labels = []\n",
    "    total_labels = len(source_tokens) + 1\n",
    "    if not all_edits:\n",
    "        labels = [[\"$KEEP\"] for x in range(total_labels)]\n",
    "    else:\n",
    "        for i in range(total_labels):\n",
    "            edit_operations = [x[1] for x in all_edits if x[0][0] == i - 1\n",
    "                               and x[0][1] == i]  # 如果这个编辑的开始字符和结束字符对的上当前源字符，那么就开始。\n",
    "            if not edit_operations:\n",
    "                labels.append([\"$KEEP\"])\n",
    "            else:\n",
    "                labels.append(edit_operations)\n",
    "    return labels\n",
    "\n",
    "\n",
    "def convert_alignments_into_edits(alignment, shift_idx):\n",
    "    edits = []\n",
    "    action, target_tokens, new_idx = alignment\n",
    "    source_token = action.replace(\"REPLACE_\", \"\")\n",
    "\n",
    "    # check if delete\n",
    "    if not target_tokens:\n",
    "        edit = [(shift_idx, 1 + shift_idx), \"$DELETE\"]\n",
    "        return [edit]\n",
    "\n",
    "    # check splits\n",
    "    for i in range(1, len(target_tokens)):\n",
    "        target_token = \" \".join(target_tokens[:i + 1])\n",
    "        transform = apply_transformation(source_token, target_token)\n",
    "        if transform:\n",
    "            edit = [(shift_idx, shift_idx + 1), transform]\n",
    "            edits.append(edit)\n",
    "            target_tokens = target_tokens[i + 1:]\n",
    "            for target in target_tokens:\n",
    "                edits.append([(shift_idx, shift_idx + 1), f\"$APPEND_{target}\"])\n",
    "            return edits\n",
    "\n",
    "    transform_costs = []\n",
    "    transforms = []\n",
    "    for target_token in target_tokens:\n",
    "        transform = apply_transformation(source_token, target_token)  # 检查是哪种变化（相等，大小写，动词时态，单复数）\n",
    "        if transform:  # 时态变化代价为0\n",
    "            cost = 0\n",
    "            transforms.append(transform)\n",
    "        else:\n",
    "            cost = Levenshtein.distance(source_token, target_token)\n",
    "            transforms.append(None)\n",
    "        transform_costs.append(cost)\n",
    "    min_cost_idx = transform_costs.index(min(transform_costs))\n",
    "    # append to the previous word（如果与源序列编辑距离最短的是后面的单词（min_cost_idx），那么要加上前面的单词append）\n",
    "    for i in range(0, min_cost_idx):\n",
    "        target = target_tokens[i]\n",
    "        edit = [(shift_idx - 1, shift_idx), f\"$APPEND_{target}\"]\n",
    "        edits.append(edit)\n",
    "    # replace/transform target word\n",
    "    transform = transforms[min_cost_idx]\n",
    "    # 如果没找到上述变化，那么就是代替。\n",
    "    target = transform if transform is not None \\\n",
    "        else f\"$REPLACE_{target_tokens[min_cost_idx]}\"\n",
    "    edit = [(shift_idx, 1 + shift_idx), target]\n",
    "    edits.append(edit)\n",
    "    # append to this word（如果与源序列编辑距离最短的是前面的单词（min_cost_idx和len(target_tokens)控制），那么要加上后面的单词append））\n",
    "    for i in range(min_cost_idx + 1, len(target_tokens)):  # [0+1,2]\n",
    "        target = target_tokens[i]\n",
    "        edit = [(shift_idx, 1 + shift_idx), f\"$APPEND_{target}\"]\n",
    "        edits.append(edit)\n",
    "    return edits\n",
    "\n",
    "\n",
    "def add_labels_to_the_tokens(source_tokens, labels, delimeters=SEQ_DELIMETERS):\n",
    "    tokens_with_all_tags = []\n",
    "    source_tokens_with_start = [START_TOKEN] + source_tokens\n",
    "    for token, label_list in zip(source_tokens_with_start, labels):\n",
    "        all_tags = delimeters['operations'].join(label_list)\n",
    "        comb_record = token + delimeters['labels'] + all_tags\n",
    "        tokens_with_all_tags.append(comb_record)\n",
    "    return delimeters['tokens'].join(tokens_with_all_tags)\n",
    "\n",
    "\n",
    "def convert_data_from_raw_files(source_file, target_file, output_file, vocab_path, min_count, save_vocab = False, worker_num = 8):\n",
    "    tagged = []\n",
    "    dic = defaultdict(int)  # 统计一下当前数据集里的编辑label及其出现次数，过滤掉出现次数过少的\n",
    "    source_data, target_data = read_parallel_lines(source_file, target_file)\n",
    "    print(f\"The size of raw dataset is {len(source_data)}\")\n",
    "    with Pool(worker_num) as pool:\n",
    "        for aligned_sent, align_labels in pool.imap(align_sequences, tqdm(zip(source_data, target_data)), chunksize=8):\n",
    "            if aligned_sent and align_labels:\n",
    "                for label_list in align_labels:\n",
    "                    for label in label_list:\n",
    "                        dic[label] += 1\n",
    "                tagged.append(aligned_sent)\n",
    "\n",
    "    labels = [label for label in dic.keys() if dic[label] > min_count]\n",
    "    labels.append(\"@@UNKNOWN@@\")\n",
    "    labels.append(\"@@PADDING@@\")\n",
    "    if save_vocab:\n",
    "        write_lines(vocab_path + '/labels.txt', labels, 'w')\n",
    "    # 写入到文件中\n",
    "    if tagged:\n",
    "        write_lines(output_file, tagged, mode='w')\n",
    "\n",
    "\n",
    "def convert_labels_into_edits(labels):\n",
    "    all_edits = []\n",
    "    for i, label_list in enumerate(labels):\n",
    "        if label_list == [\"$KEEP\"]:\n",
    "            continue\n",
    "        else:\n",
    "            edit = [(i - 1, i), label_list]\n",
    "            all_edits.append(edit)\n",
    "    return all_edits\n",
    "\n",
    "\n",
    "def get_target_sent_by_levels(source_tokens, labels):\n",
    "    relevant_edits = convert_labels_into_edits(labels)\n",
    "    target_tokens = source_tokens[:]\n",
    "    leveled_target_tokens = {}\n",
    "    if not relevant_edits:\n",
    "        target_sentence = \" \".join(target_tokens)\n",
    "        return leveled_target_tokens, target_sentence\n",
    "    max_level = max([len(x[1]) for x in relevant_edits])  # （把一个源序列对应多个目标序列（目标序列的最大编辑））\n",
    "    for level in range(max_level):\n",
    "        rest_edits = []\n",
    "        shift_idx = 0\n",
    "        for edits in relevant_edits:\n",
    "            (start, end), label_list = edits\n",
    "            label = label_list[0]\n",
    "            target_pos = start + shift_idx\n",
    "            source_token = target_tokens[target_pos] if target_pos >= 0 else START_TOKEN\n",
    "            if label == \"$DELETE\":\n",
    "                del target_tokens[target_pos]\n",
    "                shift_idx -= 1\n",
    "            elif label.startswith(\"$APPEND_\"):\n",
    "                word = label.replace(\"$APPEND_\", \"\")\n",
    "                target_tokens[target_pos + 1: target_pos + 1] = [word]\n",
    "                shift_idx += 1\n",
    "            elif label.startswith(\"$REPLACE_\"):\n",
    "                word = label.replace(\"$REPLACE_\", \"\")\n",
    "                target_tokens[target_pos] = word\n",
    "            elif label.startswith(\"$TRANSFORM\"):\n",
    "                word = apply_reverse_transformation(source_token, label)\n",
    "                if word is None:\n",
    "                    word = source_token\n",
    "                target_tokens[target_pos] = word\n",
    "            elif label.startswith(\"$MERGE_\"):\n",
    "                # apply merge only on last stage\n",
    "                if level == (max_level - 1):\n",
    "                    target_tokens[target_pos + 1: target_pos + 1] = [label]\n",
    "                    shift_idx += 1\n",
    "                else:\n",
    "                    rest_edit = [(start + shift_idx, end + shift_idx), [label]]\n",
    "                    rest_edits.append(rest_edit)\n",
    "            rest_labels = label_list[1:]  # 如果还有剩下的标签。（源词汇单个词对应的标签有两个的情况，把剩下的存下来）\n",
    "            if rest_labels:\n",
    "                rest_edit = [(start + shift_idx, end + shift_idx), rest_labels]\n",
    "                rest_edits.append(rest_edit)\n",
    "\n",
    "        leveled_tokens = target_tokens[:]\n",
    "        # update next step\n",
    "        relevant_edits = rest_edits[:]\n",
    "        if level == (max_level - 1):  # 如果是最后一次编辑操作\n",
    "            leveled_tokens = replace_merge_transforms(leveled_tokens)\n",
    "\n",
    "        leveled_labels = convert_edits_into_labels(leveled_tokens,\n",
    "                                                   relevant_edits)  # 把编辑转化为编辑\n",
    "        leveled_target_tokens[level + 1] = {\"tokens\": leveled_tokens,\n",
    "                                            \"labels\": leveled_labels}  # 把这一次更新的token和对应的label存储起来\n",
    "\n",
    "    target_sentence = \" \".join(leveled_target_tokens[max_level][\"tokens\"])\n",
    "    return leveled_target_tokens, target_sentence\n",
    "\n",
    "\n",
    "def replace_merge_transforms(tokens):\n",
    "    if all(not x.startswith(\"$MERGE_\") for x in tokens):\n",
    "        return tokens\n",
    "    target_tokens = tokens[:]\n",
    "    allowed_range = range(1, len(tokens) - 1)\n",
    "    for i in range(len(tokens)):\n",
    "        target_token = tokens[i]\n",
    "        if target_token.startswith(\"$MERGE\"):\n",
    "            if target_token.startswith(\"$MERGE_SWAP\") and i in allowed_range:\n",
    "                target_tokens[i - 1] = tokens[i + 1]\n",
    "                target_tokens[i + 1] = tokens[i - 1]\n",
    "    target_line = \" \".join(target_tokens)\n",
    "    target_line = target_line.replace(\" $MERGE_HYPHEN \", \"-\")\n",
    "    target_line = target_line.replace(\" $MERGE_SPACE \", \"\")\n",
    "    target_line = target_line.replace(\" $MERGE_SWAP \", \" \")\n",
    "    return target_line.split()\n",
    "\n",
    "\n",
    "# 把sent_with_tags转换为目标句子。\n",
    "def convert_tagged_line(line, delimeters=SEQ_DELIMETERS):\n",
    "    label_del = delimeters['labels']\n",
    "    source_tokens = [x.split(label_del)[0]\n",
    "                     for x in line.split(delimeters['tokens'])][1:]\n",
    "    labels = [x.split(label_del)[1].split(delimeters['operations'])\n",
    "              for x in line.split(delimeters['tokens'])]\n",
    "    assert len(source_tokens) + 1 == len(labels)\n",
    "    levels_dict, target_line = get_target_sent_by_levels(source_tokens, labels)\n",
    "    return target_line\n",
    "\n",
    "\n",
    "def main(args):\n",
    "    convert_data_from_raw_files(args.source, args.target, args.output_file, args.vocab_path,  args.min_count, args.save_vocab, args.worker_num)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument('-s', '--source',\n",
    "                        help='Path to the source file',\n",
    "                        required=True)  # GEC数据源端文件（病句，每行一句），分字\n",
    "    parser.add_argument('-t', '--target',\n",
    "                        help='Path to the target file',\n",
    "                        required=True)  # GEC数据目标端文件（正确句子，每行一句，和source对应），分字\n",
    "    parser.add_argument('-o', '--output_file',\n",
    "                        help='Path to the output file',\n",
    "                        required=True)  # 输出结果，可直接用于GECToR训练\n",
    "    parser.add_argument('--min_count',\n",
    "                        type=int,\n",
    "                        help='the min-count of edit labels',\n",
    "                        default=5)  # 编辑标签保留的最小出现频率阈值\n",
    "    parser.add_argument('--vocab_path',\n",
    "                        help='Path to the model vocabulary directory.'\n",
    "                             'If not set then build vocab from data',\n",
    "                        default=\"../data/output_vocabulary_chinese_char_hsk+lang8_simplified_5\")  # 词表保存路径\n",
    "    parser.add_argument('--save_vocab',\n",
    "                        action=\"store_true\")  # 是否保存词表\n",
    "    parser.add_argument(\"--worker_num\",\n",
    "                        type=int,\n",
    "                        default=8)  # 进程数目（多进程处理）\n",
    "    args = parser.parse_args()\n",
    "    main(args)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Helpers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from collections import defaultdict, Counter\n",
    "from pathlib import Path\n",
    "import random\n",
    "import string\n",
    "from tqdm import tqdm\n",
    "import json\n",
    "from string import punctuation\n",
    "\n",
    "chinese_punct = \"……·——！―〉<>？｡。＂＃＄％＆＇（）＊＋，－／：《》；＜＝＞＠［’．＼］＾＿’｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏\"\n",
    "english_punct = punctuation\n",
    "letter = \"123456789abcdefghijklmnopqrstuvwxyz\"\n",
    "FILTER = [\"\\x7f\", \" \", \"\\uf0e0\", \"\\uf0a7\", \"\\u200e\", \"\\x8b\", \"\\uf0b7\", \"\\ue415\", \"\\u2060\", \"\\ue528\", \"\\ue529\", \"ᩘ\", \"\\ue074\", \"\\x8b\", \"\\u200c\", \"\\ue529\", \"\\ufeff\", \"\\u200b\", \"\\ue817\", \"\\xad\", '\\u200f', '️', '่', '︎']\n",
    "VOCAB_DIR = Path(__file__).resolve().parent.parent / \"data\"\n",
    "PAD = \"@@PADDING@@\"\n",
    "UNK = \"@@UNKNOWN@@\"\n",
    "START_TOKEN = \"$START\"\n",
    "SEQ_DELIMETERS = {\"tokens\": \" \",\n",
    "                  \"labels\": \"SEPL|||SEPR\",\n",
    "                  \"operations\": \"SEPL__SEPR\",\n",
    "                  \"pos_tags\": \"SEPL---SEPR\"}  # 分隔符，其中，如果一个source token被多次编辑，那么这些编辑label之间用\"SEPL__SEPR\"相分割\n",
    "PUNCT = chinese_punct + english_punct + letter + letter.upper()\n",
    "\n",
    "def split_char(line):\n",
    "    \"\"\"\n",
    "    将中文按照字分开，英文按照词分开\n",
    "    :param line: 输入句子\n",
    "    :return: 分词后的句子\n",
    "    \"\"\"\n",
    "    english = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n",
    "    output = []\n",
    "    buffer = \"\"\n",
    "    chinese_punct = \"！？｡＂＃＄％＆＇（）＊＋，－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏.\"\n",
    "    for s in line:\n",
    "        if s in english or s in english.upper() or s in string.punctuation or s in chinese_punct:  # 英文或数字或标点不分\n",
    "            buffer += s\n",
    "        else:  # 中文或空格分\n",
    "            if buffer and buffer != \" \":\n",
    "                output.append(buffer)\n",
    "            buffer = \"\"\n",
    "            if s != \" \":\n",
    "                output.append(s)\n",
    "    if buffer:\n",
    "        output.append(buffer)\n",
    "    return output\n",
    "\n",
    "def get_verb_form_dicts():\n",
    "    \"\"\"\n",
    "    从词典verb-form-vocab.txt获得用于动词形式转换变形的encode和decode。\n",
    "    verb-form-vocab.txt词典主要是存储了英文常见动词形式转换映射。\n",
    "    :return:\n",
    "    encode: key：单词形式转换， value:转换标签      likes_like:VBZ_VB\n",
    "    decode: key:likes_VAZ_VB value:like      likes_VAZ_VB:like\n",
    "    \"\"\"\n",
    "    path_to_dict = os.path.join(VOCAB_DIR, \"verb-form-vocab.txt\")\n",
    "    encode, decode = {}, {}\n",
    "    with open(path_to_dict, encoding=\"utf-8\") as f:\n",
    "        for line in f:\n",
    "            words, tags = line.split(\":\")\n",
    "            word1, word2 = words.split(\"_\")\n",
    "            tag1, tag2 = tags.split(\"_\")\n",
    "            decode_key = f\"{word1}_{tag1}_{tag2.strip()}\"\n",
    "            if decode_key not in decode:\n",
    "                encode[words] = tags\n",
    "                decode[decode_key] = word2\n",
    "    return encode, decode\n",
    "\n",
    "\n",
    "ENCODE_VERB_DICT, DECODE_VERB_DICT = get_verb_form_dicts()  # 动词形式变换编码器、解码器\n",
    "\n",
    "\n",
    "def get_target_sent_by_edits(source_tokens, edits):\n",
    "    \"\"\"\n",
    "    对源句子token列表应用编辑操作（Span-level），得到目标句子token列表\n",
    "    :param source_tokens: 源句子token列表\n",
    "    :param edits: 编辑序列\n",
    "    :return:目标句子token列表\n",
    "    \"\"\"\n",
    "    target_tokens = source_tokens[:]\n",
    "    shift_idx = 0\n",
    "    for edit in edits:\n",
    "        start, end, label, _ = edit\n",
    "        target_pos = start + shift_idx\n",
    "        source_token = target_tokens[target_pos] \\\n",
    "            if len(target_tokens) > target_pos >= 0 else ''\n",
    "        if label == \"\":\n",
    "            if target_tokens:\n",
    "                del target_tokens[target_pos]\n",
    "                shift_idx -= 1\n",
    "        elif start == end:\n",
    "            word = label.replace(\"$APPEND_\", \"\")  # 添加操作\n",
    "            target_tokens[target_pos: target_pos] = [word]\n",
    "            shift_idx += 1\n",
    "        elif label.startswith(\"$TRANSFORM_\"):  # 变形操作\n",
    "            word = apply_reverse_transformation(source_token, label)\n",
    "            if word is None:\n",
    "                word = source_token\n",
    "            target_tokens[target_pos] = word\n",
    "        elif start == end - 1:  # 替换操作\n",
    "            word = label.replace(\"$REPLACE_\", \"\")\n",
    "            target_tokens[target_pos] = word\n",
    "        elif label.startswith(\"$MERGE_\"):  # 合并操作\n",
    "            target_tokens[target_pos + 1: target_pos + 1] = [label]\n",
    "            shift_idx += 1\n",
    "\n",
    "    return replace_merge_transforms(target_tokens)  # 将Merge操作应用到目标句子token列表（当前只是用$Merge标签标记了需要合并的地方）\n",
    "\n",
    "\n",
    "def replace_merge_transforms(tokens):\n",
    "    \"\"\"\n",
    "    对序列应用Merge变形编辑（将当前token与下一个token合并）\n",
    "    :param tokens: 词序列列表\n",
    "    :return: Merge完成后的序列列表\n",
    "    \"\"\"\n",
    "    if all(not x.startswith(\"$MERGE_\") for x in tokens):\n",
    "        return tokens\n",
    "    target_tokens = tokens[:]\n",
    "    allowed_range = range(1, len(tokens) - 1)\n",
    "    for i in range(len(tokens)):\n",
    "        target_token = tokens[i]\n",
    "        if target_token.startswith(\"$MERGE\"):\n",
    "            if target_token.startswith(\"$MERGE_SWAP\") and i in allowed_range:\n",
    "                target_tokens[i - 1] = tokens[i + 1]\n",
    "                target_tokens[i + 1] = tokens[i - 1]\n",
    "    target_line = \" \".join(target_tokens)\n",
    "    target_line = target_line.replace(\" $MERGE_HYPHEN \", \"-\")\n",
    "    target_line = target_line.replace(\" $MERGE_SPACE \", \"\")\n",
    "    target_line = target_line.replace(\" $MERGE_SWAP \", \" \")\n",
    "    return target_line.split()\n",
    "\n",
    "\n",
    "def convert_using_case(token, smart_action):\n",
    "    \"\"\"\n",
    "    对当前token进行大小写变换\n",
    "    :param token: 当前token\n",
    "    :param smart_action: 编辑操作标签\n",
    "    :return: 编辑完成后的token\n",
    "    \"\"\"\n",
    "    if not smart_action.startswith(\"$TRANSFORM_CASE_\"):\n",
    "        return token\n",
    "    if smart_action.endswith(\"LOWER\"):\n",
    "        return token.lower()\n",
    "    elif smart_action.endswith(\"UPPER\"):\n",
    "        return token.upper()\n",
    "    elif smart_action.endswith(\"CAPITAL\"):\n",
    "        return token.capitalize()\n",
    "    elif smart_action.endswith(\"CAPITAL_1\"):\n",
    "        return token[0] + token[1:].capitalize()\n",
    "    elif smart_action.endswith(\"UPPER_-1\"):\n",
    "        return token[:-1].upper() + token[-1]\n",
    "    else:\n",
    "        return token\n",
    "\n",
    "\n",
    "def convert_using_verb(token, smart_action):\n",
    "    \"\"\"\n",
    "    对当前token进行动词时形式变换（人称、时态等）\n",
    "    :param token: 当前token\n",
    "    :param smart_action: 编辑操作标签\n",
    "    :return: 编辑完成后的token\n",
    "    \"\"\"\n",
    "    key_word = \"$TRANSFORM_VERB_\"\n",
    "    if not smart_action.startswith(key_word):\n",
    "        raise Exception(f\"Unknown action type {smart_action}\")\n",
    "    encoding_part = f\"{token}_{smart_action[len(key_word):]}\"\n",
    "    decoded_target_word = decode_verb_form(encoding_part)\n",
    "    return decoded_target_word\n",
    "\n",
    "\n",
    "def convert_using_split(token, smart_action):\n",
    "    \"\"\"\n",
    "    对当前token进行切分（去掉连字符-）\n",
    "    :param token: 当前token\n",
    "    :param smart_action: 编辑操作标签\n",
    "    :return: 编辑完成后的token\n",
    "    \"\"\"\n",
    "    key_word = \"$TRANSFORM_SPLIT\"\n",
    "    if not smart_action.startswith(key_word):\n",
    "        raise Exception(f\"Unknown action type {smart_action}\")\n",
    "    target_words = token.split(\"-\")\n",
    "    return \" \".join(target_words)\n",
    "\n",
    "\n",
    "# TODO 单复数变换不止有加s，还有加es的情况？\n",
    "def convert_using_plural(token, smart_action):\n",
    "    \"\"\"\n",
    "    对当前token进行单复数变换\n",
    "    :param token: 当前token\n",
    "    :param smart_action: 编辑操作标签\n",
    "    :return: 编辑完成后的token\n",
    "    \"\"\"\n",
    "    if smart_action.endswith(\"PLURAL\"):\n",
    "        return token + \"s\"\n",
    "    elif smart_action.endswith(\"SINGULAR\"):\n",
    "        return token[:-1]\n",
    "    else:\n",
    "        raise Exception(f\"Unknown action type {smart_action}\")\n",
    "\n",
    "\n",
    "def apply_reverse_transformation(source_token, transform):\n",
    "    \"\"\"\n",
    "    对token进行转换操作\n",
    "    :param source_token:\n",
    "    :param transform:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if transform.startswith(\"$TRANSFORM\"):\n",
    "        # deal with equal\n",
    "        if transform == \"$KEEP\":  # 冗余？\n",
    "            return source_token\n",
    "        # deal with case\n",
    "        elif transform.startswith(\"$TRANSFORM_CASE\"):\n",
    "            return convert_using_case(source_token, transform)\n",
    "        # deal with verb\n",
    "        elif transform.startswith(\"$TRANSFORM_VERB\"):\n",
    "            return convert_using_verb(source_token, transform)\n",
    "        # deal with split\n",
    "        elif transform.startswith(\"$TRANSFORM_SPLIT\"):\n",
    "            return convert_using_split(source_token, transform)\n",
    "        # deal with single/plural\n",
    "        elif transform.startswith(\"$TRANSFORM_AGREEMENT\"):\n",
    "            return convert_using_plural(source_token, transform)\n",
    "        # raise exception if not find correct type\n",
    "        raise Exception(f\"Unknown action type {transform}\")\n",
    "    else:\n",
    "        return source_token\n",
    "\n",
    "\n",
    "def read_parallel_lines(fn1, fn2):\n",
    "    \"\"\"\n",
    "    读取平行语料文件\n",
    "    :param fn1: 源句子文件（纠错前）\n",
    "    :param fn2: 目标句子文件（纠错后）\n",
    "    :return: 分别包含源句子和目标句子的两个列表\n",
    "    \"\"\"\n",
    "    lines1 = read_lines(fn1, skip_strip=True)\n",
    "    lines2 = read_lines(fn2, skip_strip=True)\n",
    "    assert len(lines1) == len(lines2), print(len(lines1), len(lines2))\n",
    "    out_lines1, out_lines2 = [], []\n",
    "    for line1, line2 in zip(lines1, lines2):\n",
    "        if not line1.strip() or not line2.strip():\n",
    "            continue\n",
    "        else:\n",
    "            out_lines1.append(line1)\n",
    "            out_lines2.append(line2)\n",
    "    return out_lines1, out_lines2\n",
    "\n",
    "\n",
    "def read_lines(fn, skip_strip=False):\n",
    "    \"\"\"\n",
    "    从文件中读取每一行\n",
    "    :param fn: 文件路径\n",
    "    :param skip_strip: 是否跳过空行\n",
    "    :return: 包含文件中每一行的列表\n",
    "    \"\"\"\n",
    "    if not os.path.exists(fn):\n",
    "        return []\n",
    "    with open(fn, 'r', encoding='utf-8') as f:\n",
    "        lines = f.readlines()\n",
    "    return [s.strip() for s in lines if s.strip() or skip_strip]\n",
    "\n",
    "def write_lines(fn, lines, mode='w'):\n",
    "    \"\"\"\n",
    "    将数据写入到文件中\n",
    "    :param fn: 输出文件路径\n",
    "    :param lines: 需要写入的数据\n",
    "    :param mode: 写入的模式（w、a等）\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if mode == 'w' and os.path.exists(fn):\n",
    "        os.remove(fn)\n",
    "    with open(fn, encoding='utf-8', mode=mode) as f:\n",
    "        f.writelines(['%s\\n' % s for s in lines])\n",
    "\n",
    "\n",
    "def decode_verb_form(original):\n",
    "    return DECODE_VERB_DICT.get(original)\n",
    "\n",
    "\n",
    "def encode_verb_form(original_word, corrected_word):\n",
    "    decoding_request = original_word + \"_\" + corrected_word\n",
    "    decoding_response = ENCODE_VERB_DICT.get(decoding_request, \"\").strip()\n",
    "    if original_word and decoding_response:\n",
    "        answer = decoding_response\n",
    "    else:\n",
    "        answer = None\n",
    "    return answer"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
