{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 1.导入bert预训练模型 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import codecs\n",
    "import os\n",
    "import sys\n",
    " \n",
    "import numpy as np\n",
    "import yaml\n",
    "from keras.models import Sequential\n",
    "from keras import Input, Model, losses\n",
    "from keras.layers import Lambda, Dense, Bidirectional, Dropout, LSTM\n",
    "from keras.optimizers import Adam\n",
    "from keras.preprocessing import sequence\n",
    "from keras_bert import Tokenizer, load_trained_model_from_checkpoint\n",
    " \n",
    "# 预训练模型的位置\n",
    "config_path = r'.\\bert_data\\bert_config.json'# 加载配置文件\n",
    "checkpoint_path = r'.\\bert_data\\bert_model.ckpt'\n",
    "dict_path = r'.\\bert_data\\vocab.txt'\n",
    " \n",
    "maxlen=100# 句子的最大长度"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2.预定义一些函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_token_dict(dict_path):\n",
    "    '''\n",
    "    :param: dict_path: 是bert模型的vocab.txt文件\n",
    "    :return:将文件中字进行编码\n",
    "    '''\n",
    "    # 将官方弄好的词汇总文件进行字典编码\n",
    "    token_dict = {}\n",
    "    with codecs.open(dict_path, 'r', 'utf-8') as reader:\n",
    "        for line in reader:\n",
    "            token = line.strip()\n",
    "            token_dict[token] = len(token_dict)\n",
    "    return token_dict\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.数据预处理 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import re\n",
    "\n",
    "\n",
    "def get_data():\n",
    "    '''\n",
    "    读取数据的函数\n",
    "    :return: x_train, y_train, x_test, id_test\n",
    "    '''\n",
    "    train_df = pd.read_csv(\"./data/train.csv\")\n",
    "    test_df = pd.read_csv(\"./data/test.csv\")\n",
    "\n",
    "    y_train = train_df[['target']].values\n",
    "    raw_x_train = train_df[['text']].values\n",
    "    x_train = []\n",
    "    \n",
    "    raw_x_test = test_df[['text']].values\n",
    "    x_test = []\n",
    "    id_test = test_df[['id']].values\n",
    "#     print(raw_x_train)\n",
    "    \n",
    "    # 将训练文本中的网址给去除\n",
    "    for [centence] in raw_x_train:\n",
    "        centence = re.sub(r\"(http|https)://\\S* ?\", \"\", centence.strip())\n",
    "        x_train.append(centence)\n",
    "\n",
    "    # 将测试文本中的网址给去除\n",
    "    for [centence] in raw_x_test:\n",
    "        centence = re.sub(r\"(http|https)://\\S* ?\", \"\", centence.strip())\n",
    "        x_test.append(centence)\n",
    "\n",
    "    return x_train, y_train, x_test, id_test\n",
    "\n",
    "    # 得到编码\n",
    "def get_encode(data, token_dict, max_len=100):\n",
    "    '''\n",
    "    :param data:字符串数组[text1, text2, text3......]\n",
    "    :param token_dict:编码字典\n",
    "    :param max_len:最大字符长度\n",
    "    :return:[X1,X2]，其中X1是经过编码后的集合，X2表示第一句和第二句的位置，记录的是位置信息\n",
    "    '''\n",
    "\n",
    "    tokenizer = Tokenizer(token_dict)\n",
    "    X1 = []\n",
    "    X2 = []\n",
    "    for line in data:\n",
    "\n",
    "        # 将字符按照\n",
    "        x1, x2 = tokenizer.encode(first=line)\n",
    "        X1.append(x1)\n",
    "        X2.append(x2)\n",
    "    # 利用Keras API进行对数据集  补齐  操作。\n",
    "    # 与word2vec没什么区别，都需要进行补齐\n",
    "    X1 = sequence.pad_sequences(X1, maxlen=maxlen, padding='post', truncating='post')\n",
    "    X2 = sequence.pad_sequences(X2, maxlen=maxlen, padding='post', truncating='post')\n",
    "    return [X1, X2]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train, y_train, x_test, id_test = get_data()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[array([[  101, 14008, 18589, ...,     0,     0,     0],\n",
       "        [  101, 14958, 12319, ...,     0,     0,     0],\n",
       "        [  101, 10367, 20679, ...,     0,     0,     0],\n",
       "        ...,\n",
       "        [  101, 50957,   119, ...,     0,     0,     0],\n",
       "        [  101, 13202, 10104, ...,     0,     0,     0],\n",
       "        [  101, 10103, 40883, ...,     0,     0,     0]]),\n",
       " array([[0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0]])]"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_encode(x_train, get_token_dict(dict_path), max_len=maxlen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_bert_model(X1, X2):\n",
    "    '''\n",
    "    :param X1:经过编码过后的集合\n",
    "    :param X2:经过编码过后的位置集合\n",
    "    :return:模型\n",
    "    '''\n",
    "    # 加载  Google 训练好的bert模型\n",
    "    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n",
    "    wordvec = bert_model.predict([X1, X2])\n",
    "    # 得到的向量矩阵\n",
    "    return wordvec\n",
    "\n",
    "\n",
    "def build_model():\n",
    "    model = Sequential()\n",
    "    model.add(Bidirectional(LSTM(128)))\n",
    "    model.add(Dropout(0.5))\n",
    "    model.add(Dense(1, activation=\"sigmoid\"))\n",
    "    model.compile(loss=\"binary_crossentropy\", optimizer=Adam(1e-5), metrics=['accuracy'])\n",
    "\n",
    "    return model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train, y_train, x_test, id_test = get_data()\n",
    "token_dict = get_token_dict(dict_path)\n",
    "# get_encode()\n",
    "[X1_train, X2_train] = get_encode(x_train, token_dict)\n",
    "\n",
    "wordvec = build_bert_model(X1_train, X2_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 6090 samples, validate on 1523 samples\n",
      "Epoch 1/10\n",
      "6090/6090 [==============================] - 31s 5ms/step - loss: 0.6518 - accuracy: 0.6171 - val_loss: 0.6022 - val_accuracy: 0.7295\n",
      "Epoch 2/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.5809 - accuracy: 0.7286 - val_loss: 0.5309 - val_accuracy: 0.7656\n",
      "Epoch 3/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.5207 - accuracy: 0.7690 - val_loss: 0.4819 - val_accuracy: 0.7800\n",
      "Epoch 4/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4856 - accuracy: 0.7828 - val_loss: 0.4549 - val_accuracy: 0.7958\n",
      "Epoch 5/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4615 - accuracy: 0.7993 - val_loss: 0.4400 - val_accuracy: 0.8024\n",
      "Epoch 6/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4525 - accuracy: 0.8038 - val_loss: 0.4291 - val_accuracy: 0.8089\n",
      "Epoch 7/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4367 - accuracy: 0.8102 - val_loss: 0.4224 - val_accuracy: 0.8102\n",
      "Epoch 8/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4303 - accuracy: 0.8164 - val_loss: 0.4173 - val_accuracy: 0.8109\n",
      "Epoch 9/10\n",
      "6090/6090 [==============================] - 32s 5ms/step - loss: 0.4243 - accuracy: 0.8166 - val_loss: 0.4127 - val_accuracy: 0.8109\n",
      "Epoch 10/10\n",
      "6090/6090 [==============================] - 30s 5ms/step - loss: 0.4191 - accuracy: 0.8207 - val_loss: 0.4104 - val_accuracy: 0.8116\n"
     ]
    }
   ],
   "source": [
    "model = build_model()\n",
    "model.fit(wordvec, y_train, batch_size=32, epochs=10, validation_split=0.2)\n",
    "yaml_string = model.to_yaml()\n",
    "with open('test_keras_bert.yml', 'w') as f:\n",
    "    f.write(yaml.dump(model.to_yaml(), default_flow_style=True))\n",
    "model.save('test_keras_bert.h5')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4.开始预测 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据处理\n",
    "[X1_test, X2_test] = get_encode(x_test, token_dict)\n",
    "wordvec_test = build_bert_model(X1_test, X2_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = model.predict(wordvec_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "final_result = list(map(lambda i: 1 if i>0.5 else 0, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "submit_result = pd.DataFrame({'id': np.squeeze(id_test), 'target': final_result})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "submit_result.to_csv('bert_result_1.txt', index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
