{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "vscode": {
     "languageId": "shellscript"
    }
   },
   "outputs": [],
   "source": [
    "# Seq2Edit模型的三个输出文件，这里更换了3个不同的随机种子进行训练和预测\n",
    "RESULT_FILE1=$1  # 第一个Seq2Edit模型的输出文件\n",
    "RESULT_FILE2=$2  # 第二个Seq2Edit模型的输出文件\n",
    "RESULT_FILE3=$3  # 第三个Seq2Edit模型的输出文件\n",
    "\n",
    "# Seq2Seq模型的三个输出文件，同样更换了3个不同的随机种子\n",
    "RESULT_FILE4=$4  # 第一个Seq2Seq模型的输出文件\n",
    "RESULT_FILE5=$5  # 第二个Seq2Seq模型的输出文件\n",
    "RESULT_FILE6=$6  # 第三个Seq2Seq模型的输出文件\n",
    "\n",
    "# 这段代码被注释掉了，原意是循环尝试不同的阈值\n",
    "# for ((i=1; i<=6; i++))\n",
    "# do\n",
    "#     THRESHOLD=$i  # 阈值设定为1到6，逐步增加\n",
    "#     RESULT_DIR=./ensemble_results/3seq2edit_3seq2seq_threshold_$THRESHOLD  # 根据当前阈值创建结果目录\n",
    "#     mkdir -p $RESULT_DIR  # 创建目录用于存储集成结果\n",
    "\n",
    "#     M2_OUTPUT_FILE=$RESULT_DIR/MuCGEC_test.m2_temp  # 中间文件，保存编辑集成结果的.m2格式文件\n",
    "#     OUTPUT_FILE=$RESULT_DIR/MuCGEC_test.output  # 最终输出文件\n",
    "\n",
    "#     # 调用集成脚本edit_ensemble.py，将多个模型的结果进行集成，输出为m2格式\n",
    "#     python edit_ensemble.py --result_path $RESULT_FILE1 $RESULT_FILE2 $RESULT_FILE3 $RESULT_FILE4 $RESULT_FILE5 $RESULT_FILE6 --output_path $M2_OUTPUT_FILE --threshold $THRESHOLD\n",
    "\n",
    "#     # 调用m2convertor.py脚本，将中间结果转换为最终的输出格式\n",
    "#     python m2convertor.py -f $M2_OUTPUT_FILE -o $OUTPUT_FILE\n",
    "# done\n",
    "\n",
    "# 实际使用的集成策略，阈值固定为4\n",
    "THRESHOLD=4  # 设置阈值为4，表示当某个编辑在6个模型中至少被4个模型认定为正确时，才保留该编辑\n",
    "\n",
    "# 创建存放集成结果的目录，目录名中包含集成方法（3个Seq2Edit和3个Seq2Seq）和阈值信息\n",
    "RESULT_DIR=./ensemble_results/3seq2edit_3seq2seq_threshold_$THRESHOLD\n",
    "mkdir -p $RESULT_DIR  # 创建结果目录\n",
    "\n",
    "# 定义中间结果文件路径\n",
    "M2_OUTPUT_FILE=$RESULT_DIR/MuCGEC_test.m2_temp  # 临时输出文件，保存集成后的编辑结果，格式为.m2\n",
    "OUTPUT_FILE=$RESULT_DIR/MuCGEC_test.output  # 最终输出文件，保存为需要的输出格式\n",
    "\n",
    "# 调用集成脚本rule_ensemble.py，将6个模型的预测结果进行融合\n",
    "python rule_ensemble.py --result_path $RESULT_FILE1 $RESULT_FILE2 $RESULT_FILE3 $RESULT_FILE4 $RESULT_FILE5 $RESULT_FILE6 --output_path $M2_OUTPUT_FILE --threshold $THRESHOLD\n",
    "# 参数说明：\n",
    "# --result_path：传入6个模型的预测结果文件（3个Seq2Edit和3个Seq2Seq）\n",
    "# --output_path：指定输出文件路径（.m2格式的临时文件）\n",
    "# --threshold：阈值，表示编辑需要出现多少次才会被保留，设置为4\n",
    "\n",
    "# 调用m2convertor.py脚本，将集成后的.m2文件转换为最终输出格式\n",
    "python m2convertor.py -f $M2_OUTPUT_FILE -o $OUTPUT_FILE\n",
    "# 参数说明：\n",
    "# -f：输入的.m2文件，来自rule_ensemble.py的输出\n",
    "# -o：最终输出文件\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### rule_ensemble.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "from collections import Counter\n",
    "from modules.classifier import check_spell_error\n",
    "from tqdm import tqdm\n",
    "\n",
    "def parse_m2(filename):\n",
    "    \"\"\"解析m2格式文件\n",
    "\n",
    "    Arguments:\n",
    "        filename -- 文件名\n",
    "    \"\"\"\n",
    "    sources = []\n",
    "    edits = []\n",
    "    with open(filename, \"r\") as f:\n",
    "        chunk = []\n",
    "        for line in f:\n",
    "            if line == \"\\n\":\n",
    "                sources.append(chunk[0])\n",
    "                edit_list = []\n",
    "                for s in chunk[2:]:\n",
    "                    if s[0] != \"A\": break\n",
    "                    edit_list.append(s)\n",
    "                edits.append(edit_list)\n",
    "                chunk = []\n",
    "            else:\n",
    "                chunk.append(line.rstrip(\"\\n\"))\n",
    "        if chunk:\n",
    "            sources.append(chunk[0])\n",
    "            edit_list = []\n",
    "            for s in chunk[2:]:\n",
    "                if s[0] != \"A\": break\n",
    "                edit_list.append(s)\n",
    "            edits.append(edit_list)\n",
    "    return sources, edits\n",
    "        \n",
    "\n",
    "def validate(edits):\n",
    "    edits_with_pos = []\n",
    "    for edit, times in edits:\n",
    "        _, ss, se = edit.split(\"|||\")[0].split(\" \")\n",
    "        ss, se = int(ss), int(se)\n",
    "        edits_with_pos.append((ss, se, edit, times))\n",
    "    edits_with_pos.sort(key=lambda x: (x[0], -times))  # 按照起始位置从小到大排序，起始位置相同，按照编辑出现次数从大到小排序\n",
    "    final_edits = [edits_with_pos[0][2]]\n",
    "    for i in range(1, len(edits_with_pos)):\n",
    "        if edits_with_pos[i][0] < edits_with_pos[i-1][1]:  # 有重叠span\n",
    "            edits_with_pos[i] = edits_with_pos[i-1]  # 后续的span和前一个span比较\n",
    "            continue\n",
    "        if edits_with_pos[i][0] == edits_with_pos[i-1][0] and edits_with_pos[i][1] == edits_with_pos[i-1][1]:\n",
    "            edits_with_pos[i] = edits_with_pos[i-1]  # 后续的span和前一个span比较\n",
    "            continue\n",
    "        final_edits.append(edits_with_pos[i][-2])\n",
    "    final_final_edits = []\n",
    "    for e in final_edits:\n",
    "        if len(final_final_edits) == 0 or e != final_final_edits[-1]:\n",
    "            final_final_edits.append(e)\n",
    "    return final_final_edits\n",
    "\n",
    "\n",
    "def main(args):\n",
    "    total_edits = []\n",
    "    for f in args.result_path:\n",
    "        sources, edits = parse_m2(f)\n",
    "        total_edits.append(edits)\n",
    "    with open(args.output_path, \"w\", encoding=\"utf-8\") as o:\n",
    "        for i in tqdm(range(len(sources))):\n",
    "            src = sources[i]\n",
    "            src_tokens = src.split(\" \")[1:]\n",
    "            edit_candidates = []\n",
    "            for edits in total_edits:\n",
    "                edit_candidates.extend(edits[i])\n",
    "            final_edits = []\n",
    "            c = Counter(edit_candidates)\n",
    "            if c[\"A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||0\"] > (6 - args.threshold):  # 没有错误\n",
    "                out = src + \"\\n\" + \"A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||0\" + \"\\n\\n\"\n",
    "                o.write(out)\n",
    "                continue\n",
    "            for k, v in c.items():\n",
    "                if v >= args.threshold:\n",
    "                    if k != \"A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||0\":\n",
    "                        final_edits.append((k, v))\n",
    "                if \"|||W|||\" in k and v >= args.threshold - 1:  # 词序错误特殊阈值\n",
    "                    final_edits.append((k, v))\n",
    "                if \"|||S|||\" in k and v >= args.threshold - 1:  # 拼写错误特殊阈值\n",
    "                    _, ss, se = k.split(\"|||\")[0].split(\" \")\n",
    "                    src_span = \"\".join(src_tokens[int(ss): int(se)])\n",
    "                    tgt_span = k.split(\"|||\")[2].replace(\" \", \"\")\n",
    "                    if check_spell_error(src_span, tgt_span):\n",
    "                        final_edits.append((k, v))\n",
    "            if final_edits:\n",
    "                final_edits = validate(final_edits)\n",
    "                out = src + \"\\n\" + \"\\n\".join(final_edits) + \"\\n\\n\"\n",
    "            else:\n",
    "                out = src + \"\\n\" + \"A -1 -1|||noop|||-NONE-|||REQUIRED|||-NONE-|||0\" + \"\\n\\n\"\n",
    "            o.write(out)\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument('--result_path',\n",
    "                        help='Path to the result file.', nargs='+',\n",
    "                        required=True)\n",
    "    parser.add_argument('--output_path',\n",
    "                        help='Path to the output file.',\n",
    "                        required=True)\n",
    "    parser.add_argument('-T', '--threshold',\n",
    "                        help='Threshold.',\n",
    "                        type=int,\n",
    "                        default=2)\n",
    "    args = parser.parse_args()\n",
    "    main(args)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### m2convertor.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding:UTF-8 -*-\n",
    "# @Author: Xuezhi Fang\n",
    "# @Date: 2020-06-19\n",
    "# @Email: jasonfang3900@gmail.com\n",
    "\n",
    "import argparse\n",
    "import re\n",
    "\n",
    "\n",
    "class M2Processor():\n",
    "    def __init__(self, src_sent, edit_lines):\n",
    "        self.src_sent = src_sent\n",
    "        self.edit_lines = edit_lines\n",
    "        self.edits = {}\n",
    "        self.trg_sents = []\n",
    "        \n",
    "    def conv_edit(self, line):\n",
    "        line = line.strip().split(\"|||\")\n",
    "        edit_span = line[0].split(\" \")\n",
    "        edit_span = (int(edit_span[0]), int(edit_span[1]))\n",
    "        edit_res = line[2]\n",
    "        editor = line[-1]\n",
    "        if edit_span[0] == -1:\n",
    "            return None\n",
    "        if edit_span[0] == edit_span[1]:\n",
    "            edit_tag = \"ADD\"\n",
    "        elif edit_res == \"-NONE-\" or edit_res == \"\":\n",
    "            edit_tag = \"DEL\"\n",
    "        else:\n",
    "            edit_tag = \"REP\"\n",
    "        return editor, edit_tag, edit_span, edit_res\n",
    "    \n",
    "    def get_edits(self):\n",
    "        for line in self.edit_lines:\n",
    "            if line:\n",
    "                edit_item = self.conv_edit(line)\n",
    "                if not edit_item:\n",
    "                    continue\n",
    "                editor, edit_tag, edit_span, edit_res = edit_item\n",
    "                if editor not in self.edits:\n",
    "                    self.edits[editor] = []\n",
    "                self.edits[editor].append({\"span\": edit_span, \"op\": edit_tag, \"res\": edit_res})\n",
    "                \n",
    "    def get_para(self):\n",
    "        self.get_edits()\n",
    "        if self.edits:\n",
    "            for editor in self.edits:\n",
    "                sent = self.src_sent.split(\" \")\n",
    "                for edit_item in self.edits[editor]:\n",
    "                    edit_span, edit_tag, trg_tokens = edit_item[\"span\"], edit_item[\"op\"], edit_item[\"res\"]\n",
    "                    if edit_tag == \"DEL\":\n",
    "                        sent[edit_span[0]:edit_span[1]] = [\" \" for _ in range(edit_span[1] - edit_span[0])]\n",
    "                    else:\n",
    "                        if edit_tag == \"ADD\":\n",
    "                            if edit_span[0] != 0:\n",
    "                                sent[edit_span[0]-1] += \" \" + trg_tokens\n",
    "                            else:\n",
    "                                sent[edit_span[0]] = trg_tokens + \" \" + sent[edit_span[0]]\n",
    "                        elif edit_tag == \"REP\":\n",
    "                            src_tokens_len = len(sent[edit_span[0]:edit_span[1]])\n",
    "                            sent[edit_span[0]:edit_span[1]] = [trg_tokens] + [\" \" for _ in range(src_tokens_len-1)]\n",
    "                sent = \" \".join(sent).strip()\n",
    "                res_sent = re.sub(\" +\", \" \", sent)\n",
    "                self.trg_sents.append(res_sent)\n",
    "            return self.trg_sents\n",
    "        else:\n",
    "            return [self.src_sent]\n",
    "\n",
    "    \n",
    "def read_file():\n",
    "    src_sent = None\n",
    "    edit_lines = []\n",
    "    with open(args.f, \"r\", encoding=\"utf8\") as fr:\n",
    "        for line in fr:\n",
    "            if line:\n",
    "                line = line.strip()\n",
    "                if line.startswith(\"S \"):\n",
    "                    src_sent = line.replace(\"S \", \"\", 1)\n",
    "                elif line.startswith(\"A \"):\n",
    "                    edit_lines.append(line.replace(\"A \", \"\", 1))\n",
    "                elif line == \"\":\n",
    "                    yield src_sent, edit_lines\n",
    "                    edit_lines.clear()\n",
    "\n",
    "\n",
    "def main():\n",
    "    counter = 0\n",
    "    fw_trg = open(args.o, \"w\", encoding=\"utf8\")\n",
    "    for src_sent, edit_lines in read_file():\n",
    "        counter += 1\n",
    "        m2_item = M2Processor(src_sent, edit_lines)\n",
    "        trg_sents = m2_item.get_para()\n",
    "        prefix_counter = 0\n",
    "        fw_trg.write(trg_sents[0]+\"\\n\")\n",
    "    fw_trg.close()\n",
    " \n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"-f\", help=\"m2 file\")\n",
    "    parser.add_argument(\"-o\", help=\"output file\")\n",
    "    args = parser.parse_args()\n",
    "    main()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### check spell errors"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils.char_smi import CharFuncs\n",
    "from collections import namedtuple\n",
    "from pypinyin import pinyin, Style\n",
    "import os\n",
    "Correction = namedtuple(\n",
    "    \"Correction\",\n",
    "    [\n",
    "        \"op\",\n",
    "        \"toks\",\n",
    "        \"inds\",\n",
    "    ],\n",
    ") \n",
    "file_path = os.path.dirname(os.path.abspath(__file__))\n",
    "char_smi = CharFuncs(os.path.join(file_path.replace(\"modules\", \"\"), 'data/char_meta.txt'))\n",
    "\n",
    "def check_spell_error(src_span: str,\n",
    "                      tgt_span: str,\n",
    "                      threshold: float = 0.8) -> bool:\n",
    "    if len(src_span) != len(tgt_span):\n",
    "        return False\n",
    "    src_chars = [ch for ch in src_span]\n",
    "    tgt_chars = [ch for ch in tgt_span]\n",
    "    if sorted(src_chars) == sorted(tgt_chars):  # 词内部字符异位\n",
    "        return True\n",
    "    for src_char, tgt_char in zip(src_chars, tgt_chars):\n",
    "        if src_char != tgt_char:\n",
    "            if src_char not in char_smi.data or tgt_char not in char_smi.data:\n",
    "                return False\n",
    "            v_sim = char_smi.shape_similarity(src_char, tgt_char)\n",
    "            p_sim = char_smi.pronunciation_similarity(src_char, tgt_char)\n",
    "            if v_sim + p_sim < threshold and not (\n",
    "                    set(pinyin(src_char, style=Style.NORMAL, heteronym=True)[0]) & set(pinyin(tgt_char, style=Style.NORMAL, heteronym=True)[0])):\n",
    "                return False\n",
    "    return True\n",
    "\n",
    "class Classifier:\n",
    "    \"\"\"\n",
    "    错误类型分类器\n",
    "    \"\"\"\n",
    "    def __init__(self,\n",
    "                 granularity: str = \"word\"):\n",
    "\n",
    "        self.granularity = granularity\n",
    "\n",
    "    @staticmethod\n",
    "    def get_pos_type(pos):\n",
    "        if pos in {\"n\", \"nd\"}:\n",
    "            return \"NOUN\"\n",
    "        if pos in {\"nh\", \"ni\", \"nl\", \"ns\", \"nt\", \"nz\"}:\n",
    "            return \"NOUN-NE\"\n",
    "        if pos in {\"v\"}:\n",
    "            return \"VERB\"\n",
    "        if pos in {\"a\", \"b\"}:\n",
    "            return \"ADJ\"\n",
    "        if pos in {\"c\"}:\n",
    "            return \"CONJ\"\n",
    "        if pos in {\"r\"}:\n",
    "            return \"PRON\"\n",
    "        if pos in {\"d\"}:\n",
    "            return \"ADV\"\n",
    "        if pos in {\"u\"}:\n",
    "            return \"AUX\"\n",
    "        # if pos in {\"k\"}:  # TODO 后缀词比例太少，暂且分入其它\n",
    "        #     return \"SUFFIX\"\n",
    "        if pos in {\"m\"}:\n",
    "            return \"NUM\"\n",
    "        if pos in {\"p\"}:\n",
    "            return \"PREP\"\n",
    "        if pos in {\"q\"}:\n",
    "            return \"QUAN\"\n",
    "        if pos in {\"wp\"}:\n",
    "            return \"PUNCT\"\n",
    "        return \"OTHER\"\n",
    "\n",
    "    def __call__(self,\n",
    "                 src,\n",
    "                 tgt,\n",
    "                 edits,\n",
    "                 verbose: bool = False):\n",
    "        \"\"\"\n",
    "        为编辑操作划分错误类型\n",
    "        :param src: 错误句子信息\n",
    "        :param tgt: 正确句子信息\n",
    "        :param edits: 编辑操作\n",
    "        :param verbose: 是否打印信息\n",
    "        :return: 划分完错误类型后的编辑操作\n",
    "        \"\"\"\n",
    "        results = []\n",
    "        src_tokens = [x[0] for x in src]\n",
    "        tgt_tokens = [x[0] for x in tgt]\n",
    "        for edit in edits:\n",
    "            error_type = edit[0]\n",
    "            src_span = \" \".join(src_tokens[edit[1]: edit[2]])\n",
    "            tgt_span = \" \".join(tgt_tokens[edit[3]: edit[4]])\n",
    "            # print(tgt_span)\n",
    "            cor = None\n",
    "            if error_type[0] == \"T\":\n",
    "                cor = Correction(\"W\", tgt_span, (edit[1], edit[2]))\n",
    "            elif error_type[0] == \"D\":\n",
    "                if self.granularity == \"word\":  # 词级别可以细分错误类型\n",
    "                    if edit[2] - edit[1] > 1:  # 词组冗余暂时分为OTHER\n",
    "                        cor = Correction(\"R:OTHER\", \"-NONE-\", (edit[1], edit[2]))\n",
    "                    else:\n",
    "                        pos = self.get_pos_type(src[edit[1]][1])\n",
    "                        pos = \"NOUN\" if pos == \"NOUN-NE\" else pos\n",
    "                        pos = \"MC\" if tgt_span == \"[缺失成分]\" else pos\n",
    "                        cor = Correction(\"R:{:s}\".format(pos), \"-NONE-\", (edit[1], edit[2]))\n",
    "                else:  # 字级别可以只需要根据操作划分类型即可\n",
    "                    cor = Correction(\"R\", \"-NONE-\", (edit[1], edit[2]))\n",
    "            elif error_type[0] == \"I\":\n",
    "                if self.granularity == \"word\":  # 词级别可以细分错误类型\n",
    "                    if edit[4] - edit[3] > 1:  # 词组丢失暂时分为OTHER\n",
    "                        cor = Correction(\"M:OTHER\", tgt_span, (edit[1], edit[2]))\n",
    "                    else:\n",
    "                        pos = self.get_pos_type(tgt[edit[3]][1])\n",
    "                        pos = \"NOUN\" if pos == \"NOUN-NE\" else pos\n",
    "                        pos = \"MC\" if tgt_span == \"[缺失成分]\" else pos\n",
    "                        cor = Correction(\"M:{:s}\".format(pos), tgt_span, (edit[1], edit[2]))\n",
    "                else:  # 字级别可以只需要根据操作划分类型即可\n",
    "                    cor = Correction(\"M\", tgt_span, (edit[1], edit[2]))\n",
    "            elif error_type[0] == \"S\":\n",
    "                if self.granularity == \"word\":  # 词级别可以细分错误类型\n",
    "                    if check_spell_error(src_span.replace(\" \", \"\"), tgt_span.replace(\" \", \"\")):\n",
    "                        cor = Correction(\"S:SPELL\", tgt_span, (edit[1], edit[2]))\n",
    "                        # Todo 暂且不单独区分命名实体拼写错误\n",
    "                        # if edit[4] - edit[3] > 1:\n",
    "                        #     cor = Correction(\"S:SPELL:COMMON\", tgt_span, (edit[1], edit[2]))\n",
    "                        # else:\n",
    "                        #     pos = self.get_pos_type(tgt[edit[3]][1])\n",
    "                        #     if pos == \"NOUN-NE\":  # 命名实体拼写有误\n",
    "                        #         cor = Correction(\"S:SPELL:NE\", tgt_span, (edit[1], edit[2]))\n",
    "                        #     else:  # 普通词语拼写有误\n",
    "                        #         cor = Correction(\"S:SPELL:COMMON\", tgt_span, (edit[1], edit[2]))\n",
    "                    else:\n",
    "                        if edit[4] - edit[3] > 1:  # 词组被替换暂时分为OTHER\n",
    "                            cor = Correction(\"S:OTHER\", tgt_span, (edit[1], edit[2]))\n",
    "                        else:\n",
    "                            pos = self.get_pos_type(tgt[edit[3]][1])\n",
    "                            pos = \"NOUN\" if pos == \"NOUN-NE\" else pos\n",
    "                            pos = \"MC\" if tgt_span == \"[缺失成分]\" else pos\n",
    "                            cor = Correction(\"S:{:s}\".format(pos), tgt_span, (edit[1], edit[2]))\n",
    "                else:  # 字级别可以只需要根据操作划分类型即可\n",
    "                    cor = Correction(\"S\", tgt_span, (edit[1], edit[2]))\n",
    "            results.append(cor)\n",
    "        if verbose:\n",
    "            print(\"========== Corrections ==========\")\n",
    "            for cor in results:\n",
    "                print(\"Type: {:s}, Position: {:d} -> {:d}, Target: {:s}\".format(cor.op, cor.inds[0], cor.inds[1], cor.toks))\n",
    "        return results\n",
    "\n",
    "# print(pinyin(\"朝\", style=Style.NORMAL))"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
