{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### format the data file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "ename": "",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31mFailed to start the Kernel 'science39 (Python 3.9.18)'. \n",
      "\u001b[1;31mView Jupyter <a href='command:jupyter.viewOutput'>log</a> for further details. Too many open file descriptors"
     ]
    }
   ],
   "source": [
    "# Load data file \n",
    "import csv  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def check_max_field_count(csv_path: str) -> int:\n",
    "    # 确定最大的字段数量  \n",
    "    max_fields = 0  \n",
    "    with open(csv_path, 'r', newline='') as csvfile:  \n",
    "        csvreader = csv.reader(csvfile)  \n",
    "        for row in csvreader:  \n",
    "            max_fields = max(max_fields, len(row))  \n",
    "    return max_fields\n",
    "def fix_csv_field_count(origianl_csv_path: str,formated_csv_path: str) -> None:\n",
    "    max_fields = check_max_field_count(origianl_csv_path)\n",
    "    # 填充缺失的字段，使每一行都有相同的字段数量  \n",
    "    with open(formated_csv_path, 'w', newline='') as csvfile_out:  \n",
    "        csvwriter = csv.writer(csvfile_out)  \n",
    "        with open(origianl_csv_path, 'r', newline='') as csvfile_in:  \n",
    "            csvreader = csv.reader(csvfile_in)  \n",
    "            for row in csvreader:  \n",
    "                while len(row) < max_fields:  \n",
    "                    row.append('')  # 添加空字符串作为缺失字段的占位符  \n",
    "                csvwriter.writerow(row)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# constants\n",
    "ORIGINAL_MN_CSV_PATH = 'corpus-MN(67288).csv'\n",
    "FORMATED_MN_CSV_PATH = 'corpus-MN(67288).formated.csv'\n",
    "ORIGINAL_CH_CSV_PATH = 'corpus-CH(67288).csv'\n",
    "FORMATED_CH_CSV_PATH = 'corpus-CH(67288).formated.csv'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "fix_csv_field_count(ORIGINAL_MN_CSV_PATH,FORMATED_MN_CSV_PATH)\n",
    "fix_csv_field_count(ORIGINAL_CH_CSV_PATH,FORMATED_CH_CSV_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "check_max_field_count(FORMATED_MN_CSV_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "6"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "check_max_field_count(FORMATED_CH_CSV_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load data file\n",
    "import pandas as pd  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "字段数量: 1\n"
     ]
    }
   ],
   "source": [
    "def check_scv_field_count(csv_path: str) -> None:\n",
    "    # 读取CSV文件的第一行（头部）  \n",
    "    header = pd.read_csv(csv_path, nrows=0).columns.tolist()  \n",
    "    # 计算字段数量  \n",
    "    field_count = len(header)  \n",
    "    print(\"字段数量:\", field_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "字段数量: 3\n",
      "字段数量: 6\n"
     ]
    }
   ],
   "source": [
    "check_scv_field_count(FORMATED_MN_CSV_PATH)\n",
    "check_scv_field_count(FORMATED_CH_CSV_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "mongolian = pd.read_csv(FORMATED_MN_CSV_PATH,on_bad_lines='warn')  \n",
    "chinese = pd.read_csv(FORMATED_CH_CSV_PATH,on_bad_lines='warn')  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ᠪᠢᠳᠡ ᠤᠷᠲᠤ ᠴᠠᠭ ᠬᠡᠷᠡᠭ᠍ᠯᠡᠵᠦ ᠪᠠᠢ᠌ᠵᠤ ᠡᠨᠡ ᠲᠣᠯᠢ ᠶᠢ ᠨᠠᠢ᠌ᠷᠠᠭᠤᠯᠵᠤ ᠳᠠᠭᠤᠰᠬᠠᠨ᠎ᠠ ᠃</th>\n",
       "      <th>Unnamed: 1</th>\n",
       "      <th>Unnamed: 2</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>67287</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>66764</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>ᠵᠠᠷᠢᠮ ᠨᠢ ᠄</td>\n",
       "      <td>Boise</td>\n",
       "      <td>Idaho  ᠰᠢᠤᠳᠠᠨ  ᠨᠣᠮᠧᠷ  83232  ᠪᠣᠯᠣᠨ᠎ᠠ᠃</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>9</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       ᠪᠢᠳᠡ ᠤᠷᠲᠤ ᠴᠠᠭ ᠬᠡᠷᠡᠭ᠍ᠯᠡᠵᠦ ᠪᠠᠢ᠌ᠵᠤ ᠡᠨᠡ ᠲᠣᠯᠢ ᠶᠢ ᠨᠠᠢ᠌ᠷᠠᠭᠤᠯᠵᠤ ᠳᠠᠭᠤᠰᠬᠠᠨ᠎ᠠ ᠃  \\\n",
       "count                                               67287                     \n",
       "unique                                              66764                     \n",
       "top                                           ᠵᠠᠷᠢᠮ ᠨᠢ ᠄                      \n",
       "freq                                                    9                     \n",
       "\n",
       "       Unnamed: 1                             Unnamed: 2  \n",
       "count           1                                      1  \n",
       "unique          1                                      1  \n",
       "top         Boise  Idaho  ᠰᠢᠤᠳᠠᠨ  ᠨᠣᠮᠧᠷ  83232  ᠪᠣᠯᠣᠨ᠎ᠠ᠃  \n",
       "freq            1                                      1  "
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mongolian.describe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>我们需要很长的时间才能把这部词典编译完</th>\n",
       "      <th>Unnamed: 1</th>\n",
       "      <th>Unnamed: 2</th>\n",
       "      <th>Unnamed: 3</th>\n",
       "      <th>Unnamed: 4</th>\n",
       "      <th>Unnamed: 5</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>67287</td>\n",
       "      <td>1602</td>\n",
       "      <td>123</td>\n",
       "      <td>14</td>\n",
       "      <td>6</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>66680</td>\n",
       "      <td>1589</td>\n",
       "      <td>122</td>\n",
       "      <td>14</td>\n",
       "      <td>6</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>什么？</td>\n",
       "      <td>000美元。</td>\n",
       "      <td>CI BEGEJING-DU 0CIBAL BEY_E-BEN HALHALAHV-DV ...</td>\n",
       "      <td>Idaho，邮递区号是83232。</td>\n",
       "      <td>然後做了一件新的羊毛斗篷</td>\n",
       "      <td>都在车站接我</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>20</td>\n",
       "      <td>9</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       我们需要很长的时间才能把这部词典编译完 Unnamed: 1  \\\n",
       "count                67287       1602   \n",
       "unique               66680       1589   \n",
       "top                    什么？     000美元。   \n",
       "freq                    20          9   \n",
       "\n",
       "                                               Unnamed: 2         Unnamed: 3  \\\n",
       "count                                                 123                 14   \n",
       "unique                                                122                 14   \n",
       "top      CI BEGEJING-DU 0CIBAL BEY_E-BEN HALHALAHV-DV ...  Idaho，邮递区号是83232。   \n",
       "freq                                                    2                  1   \n",
       "\n",
       "          Unnamed: 4 Unnamed: 5  \n",
       "count              6          1  \n",
       "unique             6          1  \n",
       "top     然後做了一件新的羊毛斗篷     都在车站接我  \n",
       "freq               1          1  "
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chinese.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "注意,这里的数据量是对齐的,很好"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Merge data "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 从每个DataFrame中选取第一列\n",
    "mongolian_field = mongolian.iloc[:, 0]  # 取第一列\n",
    "chinese_field = chinese.iloc[:, 0]    # 取第一列\n",
    "\n",
    "parallel_mon_ch = pd.DataFrame({\n",
    "    'mn': mongolian_field,\n",
    "    'ch': chinese_field\n",
    "})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>mn</th>\n",
       "      <th>ch</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>67287</td>\n",
       "      <td>67287</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>66764</td>\n",
       "      <td>66680</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>ᠵᠠᠷᠢᠮ ᠨᠢ ᠄</td>\n",
       "      <td>什么？</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>9</td>\n",
       "      <td>20</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                 mn     ch\n",
       "count         67287  67287\n",
       "unique        66764  66680\n",
       "top     ᠵᠠᠷᠢᠮ ᠨᠢ ᠄     什么？\n",
       "freq              9     20"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 显示新的DataFrame\n",
    "parallel_mon_ch.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Data input pipeline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>mn</th>\n",
       "      <th>ch</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>ᠠᠩᠬᠠᠨ ᠤ᠋ ᠡᠬᠢ ᠱᠢᠷᠬᠡᠭ</td>\n",
       "      <td>原始原纤维</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>ᠨᠢᠰᠭᠡᠯ ᠦ᠋ᠨ ᠪᠠᠭᠤᠳᠠᠯ ᠤ᠋ᠨ ᠲᠣᠬᠢᠷᠠᠭᠤᠯᠤᠯᠲᠠ ᠶ᠋ᠢᠨ ᠲᠠᠰᠤᠭ</td>\n",
       "      <td>机场调度室</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>ᠬᠡᠯᠪᠡᠷᠢ ᠶ᠋ᠢᠨ ᠰᠡᠳᠭᠢᠨ ᠪᠣᠳᠣᠬᠤ ᠱᠠᠲᠤ</td>\n",
       "      <td>形式运思阶段</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>ᠯᠠᠪᠠᠢ ᠶᠢᠨ ᠰᠦᠮ᠎ᠡ</td>\n",
       "      <td>喇拜音苏莫</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>ᠪᠢᠴᠢᠭ ᠦ᠋ᠨ ᠬᠡᠯᠡᠨ ᠦ᠌ ᠬᠡᠯᠡᠨ ᠵᠦᠢ</td>\n",
       "      <td>书面语语法</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                mn      ch\n",
       "0                              ᠠᠩᠬᠠᠨ ᠤ᠋ ᠡᠬᠢ ᠱᠢᠷᠬᠡᠭ   原始原纤维\n",
       "1  ᠨᠢᠰᠭᠡᠯ ᠦ᠋ᠨ ᠪᠠᠭᠤᠳᠠᠯ ᠤ᠋ᠨ ᠲᠣᠬᠢᠷᠠᠭᠤᠯᠤᠯᠲᠠ ᠶ᠋ᠢᠨ ᠲᠠᠰᠤᠭ   机场调度室\n",
       "2                  ᠬᠡᠯᠪᠡᠷᠢ ᠶ᠋ᠢᠨ ᠰᠡᠳᠭᠢᠨ ᠪᠣᠳᠣᠬᠤ ᠱᠠᠲᠤ  形式运思阶段\n",
       "3                                  ᠯᠠᠪᠠᠢ ᠶᠢᠨ ᠰᠦᠮ᠎ᠡ   喇拜音苏莫\n",
       "4                     ᠪᠢᠴᠢᠭ ᠦ᠋ᠨ ᠬᠡᠯᠡᠨ ᠦ᠌ ᠬᠡᠯᠡᠨ ᠵᠦᠢ   书面语语法"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 当你用read_csv读文件的时候，如果文本里包含英文双引号，直接读取会导致行数变少或是直接如下报错停止\n",
    "# 此时应该对read_csv设置参数控制csv中的引号常量，设定quoting=3或是quoting=csv.QUOTE_NONE”（注：用第二种要先导csv库）然后问题就解决了。\n",
    "data_dir = '/home/applsci-1927936-main/Experiment scripts and related files/Models/' \n",
    "\n",
    "# 数据由制表符分隔开\n",
    "# Source Language 源语言\n",
    "# Target Language 目标语言 \n",
    "# 从源语言翻译到目标语言 即 蒙古语\\t汉语\n",
    "# 数据格式：英语\\t法语，注意我们的任务源语言是法语，目标语言是英语\n",
    "# TODO 数据中存在错误的行 但是这里直接跳过了\n",
    "\n",
    "testTrainingFileName = '/root/MyCode/Mugulian_Sentiment_Analysis/model/Text/Mugulian_Chinese_Translation/Experiment scripts and related files/Models/eng-fra.txt'\n",
    "fullTrainingFileName = '/root/MyCode/Mugulian_Sentiment_Analysis/model/Text/Mugulian_Chinese_Translation/Experiment scripts and related files/Models/eng-fra.full.txt'\n",
    "data_df = pd.read_csv(fullTrainingFileName, encoding='UTF-8', sep='\\t', header=None, quoting=3,\n",
    "                      names=['mn', 'ch'], index_col=False, on_bad_lines='skip')\n",
    "\n",
    "# print(data_df.shape)\n",
    "# print(data_df.values.shape)\n",
    "# print(data_df.values[0])\n",
    "# print(data_df.values[0].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>mn</th>\n",
       "      <th>ch</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>1259914</td>\n",
       "      <td>1259914</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>unique</th>\n",
       "      <td>1154586</td>\n",
       "      <td>1086832</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>top</th>\n",
       "      <td>ᠡᠯ᠎ᠡ ᠠᠢᠮᠠᠭ ᠤ᠋ᠨ ᠵᠠᠬᠢᠷᠭ᠎ᠠ ᠂ ᠬᠣᠲᠠ ᠶ᠋ᠢᠨ ᠠᠷᠠᠳ ᠤ᠋ᠨ ᠵ...</td>\n",
       "      <td>第一章  总  则</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>freq</th>\n",
       "      <td>78</td>\n",
       "      <td>78</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                       mn         ch\n",
       "count                                             1259914    1259914\n",
       "unique                                            1154586    1086832\n",
       "top     ᠡᠯ᠎ᠡ ᠠᠢᠮᠠᠭ ᠤ᠋ᠨ ᠵᠠᠬᠢᠷᠭ᠎ᠠ ᠂ ᠬᠣᠲᠠ ᠶ᠋ᠢᠨ ᠠᠷᠠᠳ ᠤ᠋ᠨ ᠵ...  第一章  总  则\n",
       "freq                                                   78         78"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_df.head()\n",
    "data_df.describe()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 规范化字符串\n",
    "def normalizeString(s):\n",
    "    # print(s) # list  ['Go.']\n",
    "    # s = s[0]\n",
    "    s = s.lower().strip()\n",
    "    #s = unicodeToAscii(s)\n",
    "    #s = re.sub(r\"([.!?])\", r\" \\1\", s)  # \\1表示group(1)即第一个匹配到的 即匹配到'.'或者'!'或者'?'后，一律替换成'空格.'或者'空格!'或者'空格？'\n",
    "    #s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)  # 非字母以及非.!?的其他任何字符 一律被替换成空格\n",
    "    s = re.sub(r'[\\s]+', \" \", s)  # 将出现的多个空格，都使用一个空格代替。例如：w='abc  1   23  1' 处理后：w='abc 1 23 1'\n",
    "    return s"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(normalizeString('Va !'))\n",
    "# print(normalizeString('Go.'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "pairs = [[normalizeString(s) for s in line] for line in parallel_mon_ch.values]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print('pairs num=', len(pairs))\n",
    "# print(pairs[0])\n",
    "# print(pairs[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# MAX_LENGTH = d_model//num_heads\n",
    "MAX_LENGTH = 100\n",
    "# 文件是英译法，我们实现的是法译英，所以进行了reverse，所以pair[1]是英语\n",
    "# 为了快速训练，仅保留“我是”“你是”“他是”等简单句子，并且删除原始文本长度大于10个标记的样本\n",
    "def filterPair(p):\n",
    "    return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH \n",
    "\n",
    "def filterPairs(pairs):\n",
    "    # 过滤，并交换句子顺序，得到法英句子对（之前是英法句子对）\n",
    "    return [[pair[1], pair[0]] for pair in pairs if filterPair(pair)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "pairs = filterPairs(pairs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "经过过滤后平行语料数目为： 67182\n",
      "['不接受他的建议将是件憾事。', 'ᠲᠡᠭᠦᠨ ᠦ ᠵᠥᠪᠯᠡᠯᠭᠡ ᠶᠢ ᠡᠰᠡ ᠬᠦᠯᠢᠶᠡᠨ ᠠᠪᠤᠪᠠᠯ ᠬᠠᠷᠠᠮᠰᠠᠯ']\n"
     ]
    }
   ],
   "source": [
    "print('经过过滤后平行语料数目为：', len(pairs))\n",
    "print(pairs[0])\n",
    "# print(random.choice(pairs))\n",
    "# print(np.array(pairs).shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 划分数据集：训练集和验证集\n",
    "# 0.0338 0.03485\n",
    "# 0.021 0.021\n",
    "train_test, val_pairs = train_test_split(pairs, test_size=0.020, random_state=1234)\n",
    "train_pairs, test_pairs = train_test_split(train_test, test_size=0.020, random_state=1234)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集句子数目： 64521\n",
      "验证集句子数目： 1344\n",
      "测试集句子数目： 1317\n"
     ]
    }
   ],
   "source": [
    "print('训练集句子数目：', len(train_pairs))\n",
    "print('验证集句子数目：', len(val_pairs))\n",
    "print('测试集句子数目：', len(test_pairs))\n",
    "# print(test_pairs[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['不，我却认为不应当这样提。', 'ᠪᠣᠯᠬᠤ ᠦᠭᠡᠢ ᠂ ᠪᠢᠳᠡ ᠣᠳᠣ ᠢᠩᠭ᠍ᠢᠵᠦ ᠳᠤᠷᠠᠳᠴᠤ ᠪᠣᠯᠬᠤ ᠦᠭᠡᠢ ᠃'], ['当然可以，只要邮局不在意就行。', 'ᠪᠣᠯᠣᠯ ᠦᠭᠡᠢ ᠂ ᠰᠢᠤᠳᠠᠨ ᠤ ᠭᠠᠵᠠᠷ ᠯᠠ ᠵᠥᠪᠰᠢᠶᠡᠷᠡᠪᠡᠯ ᠪᠣᠯᠬᠤ ᠨᠢ ᠲᠡᠷᠡ ᠃'], ['他被指控犯了与外币有关的诈欺罪', 'ᠲᠡᠷᠡ ᠭᠠᠳᠠᠭᠠᠳᠤ ᠶᠢᠨ ᠵᠣᠭᠣᠰ ᠲᠠᠢ ᠬᠣᠯᠪᠣᠭᠳᠠᠭᠰᠠᠨ ᠮᠡᠬᠡᠯᠡᠬᠦ ᠶᠠᠯ\\u180eᠠ ᠬᠢᠪᠡ ᠭᠡᠵᠦ ᠵᠠᠯᠠᠯᠳᠤᠭᠳᠠᠵᠠᠢ ᠃']]\n",
      "<class 'list'>\n"
     ]
    }
   ],
   "source": [
    "print(test_pairs[:3])\n",
    "print(type(test_pairs))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch  \n",
    "from torchtext.vocab import build_vocab_from_iterator  \n",
    "from torch.utils.data import DataLoader, Dataset  \n",
    "from typing import List, Tuple  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设tokenizer是已经定义好的分词函数  \n",
    "tokenizer = lambda x: x.split()  \n",
    "  \n",
    "# 预处理函数，添加开始和结束标记  \n",
    "def preprocess(text):  \n",
    "    tokenized_text = tokenizer(text)  \n",
    "    return ['<start>'] + tokenized_text + ['<end>']  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建词汇表的迭代器函数  \n",
    "def yield_tokens(data_iter):  \n",
    "    for text_entry in data_iter:  \n",
    "        yield preprocess(text_entry[0])  # 假设源语言和目标语言使用相同的词汇表  \n",
    "        yield preprocess(text_entry[1])  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def yield_src_tokens(train_pairs):  \n",
    "    for src, _ in train_pairs:  \n",
    "        yield src.split()  # 假设句子已经通过空格分词  \n",
    "  \n",
    "def yield_targ_tokens(train_pairs):  \n",
    "    for _, targ in train_pairs:  \n",
    "        yield targ.split()  # 假设句子已经通过空格分词  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "src_vocab = build_vocab_from_iterator(yield_src_tokens(train_pairs), specials=[\"<unk>\", \"<pad>\", \"<start>\", \"<end>\"])  \n",
    "targ_vocab = build_vocab_from_iterator(yield_targ_tokens(train_pairs), specials=[\"<unk>\", \"<pad>\", \"<start>\", \"<end>\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自定义Dataset类  \n",
    "class TranslationDataset(Dataset):  \n",
    "    def __init__(self, pairs, src_vocab, targ_vocab):  \n",
    "        self.pairs = pairs  \n",
    "        self.src_vocab = src_vocab  \n",
    "        self.targ_vocab = targ_vocab  \n",
    "  \n",
    "    def __len__(self):  \n",
    "        return len(self.pairs)  \n",
    "  \n",
    "    def __getitem__(self, index):  \n",
    "        src_text, targ_text = self.pairs[index]  \n",
    "        src_tensor = torch.tensor([self.src_vocab[tok] for tok in preprocess(src_text)], dtype=torch.long)  \n",
    "        targ_tensor = torch.tensor([self.targ_vocab[tok] for tok in preprocess(targ_text)], dtype=torch.long)  \n",
    "        return src_tensor, targ_tensor  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "ngpu = 1\n",
    "batch = 5\n",
    "BATCH_SIZE = batch * ngpu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建Dataset对象  \n",
    "train_dataset = TranslationDataset(train_pairs, src_vocab, targ_vocab)  \n",
    "val_dataset = TranslationDataset(val_pairs, src_vocab, targ_vocab)  \n",
    "test_dataset = TranslationDataset(test_pairs, src_vocab, targ_vocab)  \n",
    "  \n",
    "# 使用PyTorch的DataLoader  \n",
    "train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)  \n",
    "val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)  \n",
    "test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输入和输出的词汇表大小  \n",
    "input_vocab_size = len(src_vocab)  \n",
    "target_vocab_size = len(targ_vocab)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "64157\n",
      "38724\n"
     ]
    }
   ],
   "source": [
    "print(input_vocab_size)\n",
    "print(target_vocab_size)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "science39",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
