{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 1.导入bert预训练模型 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import codecs\n",
    "import os\n",
    "import sys\n",
    "\n",
    "import gc\n",
    "import random\n",
    "import numpy as np\n",
    "import yaml\n",
    "import pickle\n",
    "from keras.models import Sequential\n",
    "from keras import Input, Model, losses\n",
    "from keras.layers import Lambda, Dense, Bidirectional, Dropout, LSTM\n",
    "from keras.optimizers import Adam\n",
    "from keras.preprocessing import sequence\n",
    "from keras_bert import Tokenizer, load_trained_model_from_checkpoint\n",
    "from nltk.tokenize import word_tokenize\n",
    "from nltk.stem.wordnet import WordNetLemmatizer\n",
    "\n",
    "# 注意！！！！特别注意！！！此处要慎重，是绝对路径，以下内容是Bert-Pre-training\n",
    "config_path = r'.\\bert_data\\bert_config.json'# 加载配置文件\n",
    "checkpoint_path = r'.\\bert_data\\bert_model.ckpt'\n",
    "dict_path = r'.\\bert_data\\vocab.txt'\n",
    "#  以上是模型的加载，要慎重，一定要慎重！！！！！\n",
    " \n",
    "maxlen=100# 句子的最大长度，padding要用的"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2.预定义一些函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_token_dict(dict_path):\n",
    "    '''\n",
    "    :param: dict_path: 是bert模型的vocab.txt文件\n",
    "    :return:将文件中字进行编码\n",
    "    '''\n",
    "    # 将官方弄好的词汇总文件进行字典编码\n",
    "    token_dict = {}\n",
    "    with codecs.open(dict_path, 'r', 'utf-8') as reader:\n",
    "        for line in reader:\n",
    "            token = line.strip()\n",
    "            token_dict[token] = len(token_dict)\n",
    "    return token_dict\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3.数据预处理 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import re"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.1 初级不好使版本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from nltk.corpus import stopwords\n",
    "\n",
    "custom_stopwords = []\n",
    "stopword_set = stopwords.words(\"english\") + custom_stopwords + [\"url\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "    # 得到编码\n",
    "def get_encode(data, token_dict, max_len=100):\n",
    "    '''\n",
    "    :param data:字符串数组[text1, text2, text3......]\n",
    "    :param token_dict:编码字典\n",
    "    :param max_len:最大字符长度\n",
    "    :return:[X1,X2]，其中X1是经过编码后的集合，X2表示第一句和第二句的位置，记录的是位置信息\n",
    "    '''\n",
    "\n",
    "    tokenizer = Tokenizer(token_dict)\n",
    "    X1 = []\n",
    "    X2 = []\n",
    "    for line in data:\n",
    "\n",
    "        # 将字符按照\n",
    "        x1, x2 = tokenizer.encode(first=line)\n",
    "        X1.append(x1)\n",
    "        X2.append(x2)\n",
    "    # 利用Keras API进行对数据集  补齐  操作。\n",
    "    # 与word2vec没什么区别，都需要进行补齐\n",
    "    X1 = sequence.pad_sequences(X1, maxlen=maxlen, padding='post', truncating='post')\n",
    "    X2 = sequence.pad_sequences(X2, maxlen=maxlen, padding='post', truncating='post')\n",
    "    return [X1, X2]\n",
    "\n",
    "def replace_contractions(tweet):\n",
    "    tweet = re.sub(r\"he's\", \"he is\", tweet)\n",
    "    tweet = re.sub(r\"there's\", \"there is\", tweet)\n",
    "    tweet = re.sub(r\"We're\", \"We are\", tweet)\n",
    "    tweet = re.sub(r\"That's\", \"That is\", tweet)\n",
    "    tweet = re.sub(r\"won't\", \"will not\", tweet)\n",
    "    tweet = re.sub(r\"they're\", \"they are\", tweet)\n",
    "    tweet = re.sub(r\"Can't\", \"Cannot\", tweet)\n",
    "    tweet = re.sub(r\"wasn't\", \"was not\", tweet)\n",
    "    tweet = re.sub(r\"don\\x89Ûªt\", \"do not\", tweet)\n",
    "    tweet = re.sub(r\"aren't\", \"are not\", tweet)\n",
    "    tweet = re.sub(r\"isn't\", \"is not\", tweet)\n",
    "    tweet = re.sub(r\"What's\", \"What is\", tweet)\n",
    "    tweet = re.sub(r\"haven't\", \"have not\", tweet)\n",
    "    tweet = re.sub(r\"hasn't\", \"has not\", tweet)\n",
    "    tweet = re.sub(r\"There's\", \"There is\", tweet)\n",
    "    tweet = re.sub(r\"He's\", \"He is\", tweet)\n",
    "    tweet = re.sub(r\"It's\", \"It is\", tweet)\n",
    "    tweet = re.sub(r\"You're\", \"You are\", tweet)\n",
    "    tweet = re.sub(r\"I'M\", \"I am\", tweet)\n",
    "    tweet = re.sub(r\"shouldn't\", \"should not\", tweet)\n",
    "    tweet = re.sub(r\"wouldn't\", \"would not\", tweet)\n",
    "    tweet = re.sub(r\"i'm\", \"I am\", tweet)\n",
    "    tweet = re.sub(r\"I\\x89Ûªm\", \"I am\", tweet)\n",
    "    tweet = re.sub(r\"I'm\", \"I am\", tweet)\n",
    "    tweet = re.sub(r\"Isn't\", \"is not\", tweet)\n",
    "    tweet = re.sub(r\"Here's\", \"Here is\", tweet)\n",
    "    tweet = re.sub(r\"you've\", \"you have\", tweet)\n",
    "    tweet = re.sub(r\"you\\x89Ûªve\", \"you have\", tweet)\n",
    "    tweet = re.sub(r\"we're\", \"we are\", tweet)\n",
    "    tweet = re.sub(r\"what's\", \"what is\", tweet)\n",
    "    tweet = re.sub(r\"couldn't\", \"could not\", tweet)\n",
    "    tweet = re.sub(r\"we've\", \"we have\", tweet)\n",
    "    tweet = re.sub(r\"it\\x89Ûªs\", \"it is\", tweet)\n",
    "    tweet = re.sub(r\"doesn\\x89Ûªt\", \"does not\", tweet)\n",
    "    tweet = re.sub(r\"It\\x89Ûªs\", \"It is\", tweet)\n",
    "    tweet = re.sub(r\"Here\\x89Ûªs\", \"Here is\", tweet)\n",
    "    tweet = re.sub(r\"who's\", \"who is\", tweet)\n",
    "    tweet = re.sub(r\"I\\x89Ûªve\", \"I have\", tweet)\n",
    "    tweet = re.sub(r\"y'all\", \"you all\", tweet)\n",
    "    tweet = re.sub(r\"can\\x89Ûªt\", \"cannot\", tweet)\n",
    "    tweet = re.sub(r\"would've\", \"would have\", tweet)\n",
    "    tweet = re.sub(r\"it'll\", \"it will\", tweet)\n",
    "    tweet = re.sub(r\"we'll\", \"we will\", tweet)\n",
    "    tweet = re.sub(r\"wouldn\\x89Ûªt\", \"would not\", tweet)\n",
    "    tweet = re.sub(r\"We've\", \"We have\", tweet)\n",
    "    tweet = re.sub(r\"he'll\", \"he will\", tweet)\n",
    "    tweet = re.sub(r\"Y'all\", \"You all\", tweet)\n",
    "    tweet = re.sub(r\"Weren't\", \"Were not\", tweet)\n",
    "    tweet = re.sub(r\"Didn't\", \"Did not\", tweet)\n",
    "    tweet = re.sub(r\"they'll\", \"they will\", tweet)\n",
    "    tweet = re.sub(r\"they'd\", \"they would\", tweet)\n",
    "    tweet = re.sub(r\"DON'T\", \"DO NOT\", tweet)\n",
    "    tweet = re.sub(r\"That\\x89Ûªs\", \"That is\", tweet)\n",
    "    tweet = re.sub(r\"they've\", \"they have\", tweet)\n",
    "    tweet = re.sub(r\"i'd\", \"I would\", tweet)\n",
    "    tweet = re.sub(r\"should've\", \"should have\", tweet)\n",
    "    tweet = re.sub(r\"You\\x89Ûªre\", \"You are\", tweet)\n",
    "    tweet = re.sub(r\"where's\", \"where is\", tweet)\n",
    "    tweet = re.sub(r\"Don\\x89Ûªt\", \"Do not\", tweet)\n",
    "    tweet = re.sub(r\"we'd\", \"we would\", tweet)\n",
    "    tweet = re.sub(r\"i'll\", \"I will\", tweet)\n",
    "    tweet = re.sub(r\"weren't\", \"were not\", tweet)\n",
    "    tweet = re.sub(r\"They're\", \"They are\", tweet)\n",
    "    tweet = re.sub(r\"Can\\x89Ûªt\", \"Cannot\", tweet)\n",
    "    tweet = re.sub(r\"you\\x89Ûªll\", \"you will\", tweet)\n",
    "    tweet = re.sub(r\"I\\x89Ûªd\", \"I would\", tweet)\n",
    "    tweet = re.sub(r\"let's\", \"let us\", tweet)\n",
    "    tweet = re.sub(r\"it's\", \"it is\", tweet)\n",
    "    tweet = re.sub(r\"can't\", \"cannot\", tweet)\n",
    "    tweet = re.sub(r\"don't\", \"do not\", tweet)\n",
    "    tweet = re.sub(r\"you're\", \"you are\", tweet)\n",
    "    tweet = re.sub(r\"i've\", \"I have\", tweet)\n",
    "    tweet = re.sub(r\"that's\", \"that is\", tweet)\n",
    "    tweet = re.sub(r\"i'll\", \"I will\", tweet)\n",
    "    tweet = re.sub(r\"doesn't\", \"does not\", tweet)\n",
    "    tweet = re.sub(r\"i'd\", \"I would\", tweet)\n",
    "    tweet = re.sub(r\"didn't\", \"did not\", tweet)\n",
    "    tweet = re.sub(r\"ain't\", \"am not\", tweet)\n",
    "    tweet = re.sub(r\"you'll\", \"you will\", tweet)\n",
    "    tweet = re.sub(r\"I've\", \"I have\", tweet)\n",
    "    tweet = re.sub(r\"Don't\", \"do not\", tweet)\n",
    "    tweet = re.sub(r\"I'll\", \"I will\", tweet)\n",
    "    tweet = re.sub(r\"I'd\", \"I would\", tweet)\n",
    "    tweet = re.sub(r\"Let's\", \"Let us\", tweet)\n",
    "    tweet = re.sub(r\"you'd\", \"You would\", tweet)\n",
    "    tweet = re.sub(r\"It's\", \"It is\", tweet)\n",
    "    tweet = re.sub(r\"Ain't\", \"am not\", tweet)\n",
    "    tweet = re.sub(r\"Haven't\", \"Have not\", tweet)\n",
    "    tweet = re.sub(r\"Could've\", \"Could have\", tweet)\n",
    "    tweet = re.sub(r\"youve\", \"you have\", tweet)  \n",
    "    tweet = re.sub(r\"donå«t\", \"do not\", tweet) \n",
    "    return tweet\n",
    "\n",
    "def remove_emoji(text):\n",
    "    emoji_pattern = re.compile(\"[\"\n",
    "                           u\"\\U0001F600-\\U0001F64F\"  # emoticons\n",
    "                           u\"\\U0001F300-\\U0001F5FF\"  # symbols & pictographs\n",
    "                           u\"\\U0001F680-\\U0001F6FF\"  # transport & map symbols\n",
    "                           u\"\\U0001F1E0-\\U0001F1FF\"  # flags (iOS)\n",
    "                           u\"\\U00002702-\\U000027B0\"\n",
    "                           u\"\\U000024C2-\\U0001F251\"\n",
    "                           \"]+\", flags=re.UNICODE)\n",
    "    return emoji_pattern.sub(r'', text)\n",
    "\n",
    "def preProcess(iter):\n",
    "    #https://www.kaggle.com/shahules/basic-eda-cleaning-and-glove\n",
    "    # remove extra space\n",
    "    regex_ws=re.compile(\"\\s+\")\n",
    "#     print(\"before:\", iter)\n",
    "    ret=regex_ws.sub(\" \",iter)\n",
    "#     print(\"after:\", ret)\n",
    "    ret=ret.replace(\"&amp;\",\"&\").replace(\"&lt;\",\"<\").replace(\"&gt;\",\">\")\n",
    "\n",
    "\n",
    "    #Replace slang words\n",
    "    #for key in abbreviations.keys():\n",
    "    #    ret=ret.replace(key,abbreviations[key])\n",
    "\n",
    "    #Replace URL\n",
    "    regexp=\"(https?:\\/\\/(?:www\\.|(?!www)|(?:xmlns\\.))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})\"\n",
    "    ret=re.sub(regexp,\"url\",ret)\n",
    "    #replace @addresses\n",
    "    regexp='@[A-z0-9_]+'\n",
    "    ret=re.sub(regexp,\"@twitterhandle\",ret)\n",
    "\n",
    "    ret=remove_emoji(ret)\n",
    "\n",
    "    ret=replace_contractions(ret)\n",
    "    #Split on punctuations\n",
    "#     ret1=re.split(\"[,_, \\<>!\\?\\.:\\n\\\"=*/]+\",ret)\n",
    "    \n",
    "    #Remove Stopwords\n",
    "#     ret2=[word for word in ret1 if word not in stopword_set]\n",
    "#     ret2=\" \".join(ret2)\n",
    "    #Remove  numbers\n",
    "#     ret2=re.sub(r\"(\\s\\d+)\",\" \",ret2)\n",
    "\n",
    "    #STEM TEXT\n",
    "    #ret3=stem_text(strip_punctuation(ret2))\n",
    "\n",
    "    return ret\n",
    "\n",
    "def get_data(is_shuffle=False):\n",
    "    '''\n",
    "    读取数据的函数\n",
    "    :return: x_train, y_train, x_test, id_test\n",
    "    '''\n",
    "    train_df = pd.read_csv(\"./data/train.csv\")\n",
    "    test_df = pd.read_csv(\"./data/test.csv\")\n",
    "\n",
    "    y_train = train_df[['target']].values\n",
    "    raw_x_train = train_df[['text']].values\n",
    "    x_train = []\n",
    "    \n",
    "    raw_x_test = test_df[['text']].values\n",
    "    x_test = []\n",
    "    id_test = test_df[['id']].values\n",
    "#     print(raw_x_train)\n",
    "    \n",
    "    # 将训练文本中的网址给去除\n",
    "    for [centence] in raw_x_train:\n",
    "        centence = preProcess(centence)\n",
    "        x_train.append(centence)\n",
    "\n",
    "    # 将测试文本中的网址给去除\n",
    "    for [centence] in raw_x_test:\n",
    "        centence = preProcess(centence)\n",
    "        x_test.append(centence)\n",
    "    \n",
    "    x_train = np.array(x_train)\n",
    "    y_train = np.array(y_train)\n",
    "    \n",
    "    x_test = np.array(x_test)\n",
    "    id_test = np.array(id_test)\n",
    "    \n",
    "    if is_shuffle:\n",
    "        x_train_index = [i for i in range(len(x_train))]\n",
    "        random.shuffle(x_train_index)\n",
    "        x_train = x_train[x_train_index]\n",
    "        y_train = y_train[x_train_index]\n",
    "    \n",
    "    gc.collect()\n",
    "    \n",
    "    return x_train, y_train, x_test, id_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'I will help you'"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "preProcess(\"I'll help you\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.2 super版本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from spellchecker import SpellChecker\n",
    "# https://stackoverflow.com/a/34682849\n",
    "def untokenize(words):\n",
    "    \"\"\"Untokenizing a text undoes the tokenizing operation, restoring\n",
    "    punctuation and spaces to the places that people expect them to be.\n",
    "    Ideally, `untokenize(tokenize(text))` should be identical to `text`,\n",
    "    except for line breaks.\n",
    "    \"\"\"\n",
    "    text = ' '.join(words)\n",
    "    step1 = text.replace(\"`` \", '\"').replace(\" ''\", '\"').replace('. . .', '...')\n",
    "    step2 = step1.replace(\" ( \", \" (\").replace(\" ) \", \") \")\n",
    "    step3 = re.sub(r' ([.,:;?!%]+)([ \\'\"`])', r\"\\1\\2\", step2)\n",
    "    step4 = re.sub(r' ([.,:;?!%]+)$', r\"\\1\", step3)\n",
    "    step5 = step4.replace(\" '\", \"'\").replace(\" n't\", \"n't\").replace(\n",
    "        \"can not\", \"cannot\")\n",
    "    step6 = step5.replace(\" ` \", \" '\")\n",
    "    return step6.strip()\n",
    "\n",
    "\n",
    "# https://stackoverflow.com/a/47091490\n",
    "def decontracted(phrase):\n",
    "    \"\"\"Convert contractions like \"can't\" into \"can not\"\n",
    "    \"\"\"\n",
    "    # specific\n",
    "    phrase = re.sub(r\"won\\'t\", \"will not\", phrase)\n",
    "    phrase = re.sub(r\"can\\'t\", \"can not\", phrase)\n",
    "\n",
    "    # general\n",
    "    #phrase = re.sub(r\"n't\", \" not\", phrase) # resulted in \"ca not\" when sentence started with \"can't\"\n",
    "    phrase = re.sub(r\"\\'re\", \" are\", phrase)\n",
    "    phrase = re.sub(r\"\\'s\", \" is\", phrase)\n",
    "    phrase = re.sub(r\"\\'d\", \" would\", phrase)\n",
    "    phrase = re.sub(r\"\\'ll\", \" will\", phrase)\n",
    "    phrase = re.sub(r\"\\'t\", \" not\", phrase)\n",
    "    phrase = re.sub(r\"\\'ve\", \" have\", phrase)\n",
    "    phrase = re.sub(r\"\\'m\", \" am\", phrase)\n",
    "    return phrase\n",
    "\n",
    "\n",
    "# https://github.com/rishabhverma17/sms_slang_translator/blob/master/slang.txt\n",
    "slang_abbrev_dict = {\n",
    "    'AFAIK': 'As Far As I Know',\n",
    "    'AFK': 'Away From Keyboard',\n",
    "    'ASAP': 'As Soon As Possible',\n",
    "    'ATK': 'At The Keyboard',\n",
    "    'ATM': 'At The Moment',\n",
    "    'A3': 'Anytime, Anywhere, Anyplace',\n",
    "    'BAK': 'Back At Keyboard',\n",
    "    'BBL': 'Be Back Later',\n",
    "    'BBS': 'Be Back Soon',\n",
    "    'BFN': 'Bye For Now',\n",
    "    'B4N': 'Bye For Now',\n",
    "    'BRB': 'Be Right Back',\n",
    "    'BRT': 'Be Right There',\n",
    "    'BTW': 'By The Way',\n",
    "    'B4': 'Before',\n",
    "    'B4N': 'Bye For Now',\n",
    "    'CU': 'See You',\n",
    "    'CUL8R': 'See You Later',\n",
    "    'CYA': 'See You',\n",
    "    'FAQ': 'Frequently Asked Questions',\n",
    "    'FC': 'Fingers Crossed',\n",
    "    'FWIW': 'For What It\\'s Worth',\n",
    "    'FYI': 'For Your Information',\n",
    "    'GAL': 'Get A Life',\n",
    "    'GG': 'Good Game',\n",
    "    'GN': 'Good Night',\n",
    "    'GMTA': 'Great Minds Think Alike',\n",
    "    'GR8': 'Great!',\n",
    "    'G9': 'Genius',\n",
    "    'IC': 'I See',\n",
    "    'ICQ': 'I Seek you',\n",
    "    'ILU': 'I Love You',\n",
    "    'IMHO': 'In My Humble Opinion',\n",
    "    'IMO': 'In My Opinion',\n",
    "    'IOW': 'In Other Words',\n",
    "    'IRL': 'In Real Life',\n",
    "    'KISS': 'Keep It Simple, Stupid',\n",
    "    'LDR': 'Long Distance Relationship',\n",
    "    'LMAO': 'Laugh My Ass Off',\n",
    "    'LOL': 'Laughing Out Loud',\n",
    "    'LTNS': 'Long Time No See',\n",
    "    'L8R': 'Later',\n",
    "    'MTE': 'My Thoughts Exactly',\n",
    "    'M8': 'Mate',\n",
    "    'NRN': 'No Reply Necessary',\n",
    "    'OIC': 'Oh I See',\n",
    "    'OMG': 'Oh My God',\n",
    "    'PITA': 'Pain In The Ass',\n",
    "    'PRT': 'Party',\n",
    "    'PRW': 'Parents Are Watching',\n",
    "    'QPSA?': 'Que Pasa?',\n",
    "    'ROFL': 'Rolling On The Floor Laughing',\n",
    "    'ROFLOL': 'Rolling On The Floor Laughing Out Loud',\n",
    "    'ROTFLMAO': 'Rolling On The Floor Laughing My Ass Off',\n",
    "    'SK8': 'Skate',\n",
    "    'STATS': 'Your sex and age',\n",
    "    'ASL': 'Age, Sex, Location',\n",
    "    'THX': 'Thank You',\n",
    "    'TTFN': 'Ta-Ta For Now!',\n",
    "    'TTYL': 'Talk To You Later',\n",
    "    'U': 'You',\n",
    "    'U2': 'You Too',\n",
    "    'U4E': 'Yours For Ever',\n",
    "    'WB': 'Welcome Back',\n",
    "    'WTF': 'What The Fuck',\n",
    "    'WTG': 'Way To Go!',\n",
    "    'WUF': 'Where Are You From?',\n",
    "    'W8': 'Wait',\n",
    "    '7K': 'Sick:-D Laugher'\n",
    "}\n",
    "\n",
    "\n",
    "def unslang(text):\n",
    "    \"\"\"Converts text like \"OMG\" into \"Oh my God\"\n",
    "    \"\"\"\n",
    "    if text.upper() in slang_abbrev_dict.keys():\n",
    "        return slang_abbrev_dict[text.upper()]\n",
    "    else:\n",
    "        return text\n",
    "\n",
    "\n",
    "# https://gist.github.com/sebleier/554280\n",
    "stopwords = [\n",
    "    \"a\", \"about\", \"above\", \"after\", \"again\", \"against\", \"ain\", \"all\", \"am\",\n",
    "    \"an\", \"and\", \"any\", \"are\", \"aren\", \"aren't\", \"as\", \"at\", \"be\", \"because\",\n",
    "    \"been\", \"before\", \"being\", \"below\", \"between\", \"both\", \"but\", \"by\", \"can\",\n",
    "    \"couldn\", \"couldn't\", \"d\", \"did\", \"didn\", \"didn't\", \"do\", \"does\", \"doesn\",\n",
    "    \"doesn't\", \"doing\", \"don\", \"don't\", \"down\", \"during\", \"each\", \"few\", \"for\",\n",
    "    \"from\", \"further\", \"had\", \"hadn\", \"hadn't\", \"has\", \"hasn\", \"hasn't\", \"have\",\n",
    "    \"haven\", \"haven't\", \"having\", \"he\", \"her\", \"here\", \"hers\", \"herself\", \"him\",\n",
    "    \"himself\", \"his\", \"how\", \"i\", \"if\", \"in\", \"into\", \"is\", \"isn\", \"isn't\",\n",
    "    \"it\", \"it's\", \"its\", \"itself\", \"just\", \"ll\", \"m\", \"ma\", \"me\", \"mightn\",\n",
    "    \"mightn't\", \"more\", \"most\", \"mustn\", \"mustn't\", \"my\", \"myself\", \"needn\",\n",
    "    \"needn't\", \"no\", \"nor\", \"not\", \"now\", \"o\", \"of\", \"off\", \"on\", \"once\",\n",
    "    \"only\", \"or\", \"other\", \"our\", \"ours\", \"ourselves\", \"out\", \"over\", \"own\",\n",
    "    \"re\", \"s\", \"same\", \"shan\", \"shan't\", \"she\", \"she's\", \"should\", \"should've\",\n",
    "    \"shouldn\", \"shouldn't\", \"so\", \"some\", \"such\", \"t\", \"than\", \"that\",\n",
    "    \"that'll\", \"the\", \"their\", \"theirs\", \"them\", \"themselves\", \"then\", \"there\",\n",
    "    \"these\", \"they\", \"this\", \"those\", \"through\", \"to\", \"too\", \"under\", \"until\",\n",
    "    \"up\", \"ve\", \"very\", \"was\", \"wasn\", \"wasn't\", \"we\", \"were\", \"weren\",\n",
    "    \"weren't\", \"what\", \"when\", \"where\", \"which\", \"while\", \"who\", \"whom\", \"why\",\n",
    "    \"will\", \"with\", \"won\", \"won't\", \"wouldn\", \"wouldn't\", \"y\", \"you\", \"you'd\",\n",
    "    \"you'll\", \"you're\", \"you've\", \"your\", \"yours\", \"yourself\", \"yourselves\",\n",
    "    \"could\", \"he'd\", \"he'll\", \"he's\", \"here's\", \"how's\", \"i'd\", \"i'll\", \"i'm\",\n",
    "    \"i've\", \"let's\", \"ought\", \"she'd\", \"she'll\", \"that's\", \"there's\", \"they'd\",\n",
    "    \"they'll\", \"they're\", \"they've\", \"we'd\", \"we'll\", \"we're\", \"we've\",\n",
    "    \"what's\", \"when's\", \"where's\", \"who's\", \"why's\", \"would\", \"able\", \"abst\",\n",
    "    \"accordance\", \"according\", \"accordingly\", \"across\", \"act\", \"actually\",\n",
    "    \"added\", \"adj\", \"affected\", \"affecting\", \"affects\", \"afterwards\", \"ah\",\n",
    "    \"almost\", \"alone\", \"along\", \"already\", \"also\", \"although\", \"always\",\n",
    "    \"among\", \"amongst\", \"announce\", \"another\", \"anybody\", \"anyhow\", \"anymore\",\n",
    "    \"anyone\", \"anything\", \"anyway\", \"anyways\", \"anywhere\", \"apparently\",\n",
    "    \"approximately\", \"arent\", \"arise\", \"around\", \"aside\", \"ask\", \"asking\",\n",
    "    \"auth\", \"available\", \"away\", \"awfully\", \"b\", \"back\", \"became\", \"become\",\n",
    "    \"becomes\", \"becoming\", \"beforehand\", \"begin\", \"beginning\", \"beginnings\",\n",
    "    \"begins\", \"behind\", \"believe\", \"beside\", \"besides\", \"beyond\", \"biol\",\n",
    "    \"brief\", \"briefly\", \"c\", \"ca\", \"came\", \"cannot\", \"can't\", \"cause\", \"causes\",\n",
    "    \"certain\", \"certainly\", \"co\", \"com\", \"come\", \"comes\", \"contain\",\n",
    "    \"containing\", \"contains\", \"couldnt\", \"date\", \"different\", \"done\",\n",
    "    \"downwards\", \"due\", \"e\", \"ed\", \"edu\", \"effect\", \"eg\", \"eight\", \"eighty\",\n",
    "    \"either\", \"else\", \"elsewhere\", \"end\", \"ending\", \"enough\", \"especially\",\n",
    "    \"et\", \"etc\", \"even\", \"ever\", \"every\", \"everybody\", \"everyone\", \"everything\",\n",
    "    \"everywhere\", \"ex\", \"except\", \"f\", \"far\", \"ff\", \"fifth\", \"first\", \"five\",\n",
    "    \"fix\", \"followed\", \"following\", \"follows\", \"former\", \"formerly\", \"forth\",\n",
    "    \"found\", \"four\", \"furthermore\", \"g\", \"gave\", \"get\", \"gets\", \"getting\",\n",
    "    \"give\", \"given\", \"gives\", \"giving\", \"go\", \"goes\", \"gone\", \"got\", \"gotten\",\n",
    "    \"h\", \"happens\", \"hardly\", \"hed\", \"hence\", \"hereafter\", \"hereby\", \"herein\",\n",
    "    \"heres\", \"hereupon\", \"hes\", \"hi\", \"hid\", \"hither\", \"home\", \"howbeit\",\n",
    "    \"however\", \"hundred\", \"id\", \"ie\", \"im\", \"immediate\", \"immediately\",\n",
    "    \"importance\", \"important\", \"inc\", \"indeed\", \"index\", \"information\",\n",
    "    \"instead\", \"invention\", \"inward\", \"itd\", \"it'll\", \"j\", \"k\", \"keep\", \"keeps\",\n",
    "    \"kept\", \"kg\", \"km\", \"know\", \"known\", \"knows\", \"l\", \"largely\", \"last\",\n",
    "    \"lately\", \"later\", \"latter\", \"latterly\", \"least\", \"less\", \"lest\", \"let\",\n",
    "    \"lets\", \"like\", \"liked\", \"likely\", \"line\", \"little\", \"'ll\", \"look\",\n",
    "    \"looking\", \"looks\", \"ltd\", \"made\", \"mainly\", \"make\", \"makes\", \"many\", \"may\",\n",
    "    \"maybe\", \"mean\", \"means\", \"meantime\", \"meanwhile\", \"merely\", \"mg\", \"might\",\n",
    "    \"million\", \"miss\", \"ml\", \"moreover\", \"mostly\", \"mr\", \"mrs\", \"much\", \"mug\",\n",
    "    \"must\", \"n\", \"na\", \"name\", \"namely\", \"nay\", \"nd\", \"near\", \"nearly\",\n",
    "    \"necessarily\", \"necessary\", \"need\", \"needs\", \"neither\", \"never\",\n",
    "    \"nevertheless\", \"new\", \"next\", \"nine\", \"ninety\", \"nobody\", \"non\", \"none\",\n",
    "    \"nonetheless\", \"noone\", \"normally\", \"nos\", \"noted\", \"nothing\", \"nowhere\",\n",
    "    \"obtain\", \"obtained\", \"obviously\", \"often\", \"oh\", \"ok\", \"okay\", \"old\",\n",
    "    \"omitted\", \"one\", \"ones\", \"onto\", \"ord\", \"others\", \"otherwise\", \"outside\",\n",
    "    \"overall\", \"owing\", \"p\", \"page\", \"pages\", \"part\", \"particular\",\n",
    "    \"particularly\", \"past\", \"per\", \"perhaps\", \"placed\", \"please\", \"plus\",\n",
    "    \"poorly\", \"possible\", \"possibly\", \"potentially\", \"pp\", \"predominantly\",\n",
    "    \"present\", \"previously\", \"primarily\", \"probably\", \"promptly\", \"proud\",\n",
    "    \"provides\", \"put\", \"q\", \"que\", \"quickly\", \"quite\", \"qv\", \"r\", \"ran\",\n",
    "    \"rather\", \"rd\", \"readily\", \"really\", \"recent\", \"recently\", \"ref\", \"refs\",\n",
    "    \"regarding\", \"regardless\", \"regards\", \"related\", \"relatively\", \"research\",\n",
    "    \"respectively\", \"resulted\", \"resulting\", \"results\", \"right\", \"run\", \"said\",\n",
    "    \"saw\", \"say\", \"saying\", \"says\", \"sec\", \"section\", \"see\", \"seeing\", \"seem\",\n",
    "    \"seemed\", \"seeming\", \"seems\", \"seen\", \"self\", \"selves\", \"sent\", \"seven\",\n",
    "    \"several\", \"shall\", \"shed\", \"shes\", \"show\", \"showed\", \"shown\", \"showns\",\n",
    "    \"shows\", \"significant\", \"significantly\", \"similar\", \"similarly\", \"since\",\n",
    "    \"six\", \"slightly\", \"somebody\", \"somehow\", \"someone\", \"somethan\",\n",
    "    \"something\", \"sometime\", \"sometimes\", \"somewhat\", \"somewhere\", \"soon\",\n",
    "    \"sorry\", \"specifically\", \"specified\", \"specify\", \"specifying\", \"still\",\n",
    "    \"stop\", \"strongly\", \"sub\", \"substantially\", \"successfully\", \"sufficiently\",\n",
    "    \"suggest\", \"sup\", \"sure\", \"take\", \"taken\", \"taking\", \"tell\", \"tends\", \"th\",\n",
    "    \"thank\", \"thanks\", \"thanx\", \"thats\", \"that've\", \"thence\", \"thereafter\",\n",
    "    \"thereby\", \"thered\", \"therefore\", \"therein\", \"there'll\", \"thereof\",\n",
    "    \"therere\", \"theres\", \"thereto\", \"thereupon\", \"there've\", \"theyd\", \"theyre\",\n",
    "    \"think\", \"thou\", \"though\", \"thoughh\", \"thousand\", \"throug\", \"throughout\",\n",
    "    \"thru\", \"thus\", \"til\", \"tip\", \"together\", \"took\", \"toward\", \"towards\",\n",
    "    \"tried\", \"tries\", \"truly\", \"try\", \"trying\", \"ts\", \"twice\", \"two\", \"u\", \"un\",\n",
    "    \"unfortunately\", \"unless\", \"unlike\", \"unlikely\", \"unto\", \"upon\", \"ups\",\n",
    "    \"us\", \"use\", \"used\", \"useful\", \"usefully\", \"usefulness\", \"uses\", \"using\",\n",
    "    \"usually\", \"v\", \"value\", \"various\", \"'ve\", \"via\", \"viz\", \"vol\", \"vols\",\n",
    "    \"vs\", \"w\", \"want\", \"wants\", \"wasnt\", \"way\", \"wed\", \"welcome\", \"went\",\n",
    "    \"werent\", \"whatever\", \"what'll\", \"whats\", \"whence\", \"whenever\",\n",
    "    \"whereafter\", \"whereas\", \"whereby\", \"wherein\", \"wheres\", \"whereupon\",\n",
    "    \"wherever\", \"whether\", \"whim\", \"whither\", \"whod\", \"whoever\", \"whole\",\n",
    "    \"who'll\", \"whomever\", \"whos\", \"whose\", \"widely\", \"willing\", \"wish\",\n",
    "    \"within\", \"without\", \"wont\", \"words\", \"world\", \"wouldnt\", \"www\", \"x\", \"yes\",\n",
    "    \"yet\", \"youd\", \"youre\", \"z\", \"zero\", \"a's\", \"ain't\", \"allow\", \"allows\",\n",
    "    \"apart\", \"appear\", \"appreciate\", \"appropriate\", \"associated\", \"best\",\n",
    "    \"better\", \"c'mon\", \"c's\", \"cant\", \"changes\", \"clearly\", \"concerning\",\n",
    "    \"consequently\", \"consider\", \"considering\", \"corresponding\", \"course\",\n",
    "    \"currently\", \"definitely\", \"described\", \"despite\", \"entirely\", \"exactly\",\n",
    "    \"example\", \"going\", \"greetings\", \"hello\", \"help\", \"hopefully\", \"ignored\",\n",
    "    \"inasmuch\", \"indicate\", \"indicated\", \"indicates\", \"inner\", \"insofar\",\n",
    "    \"it'd\", \"keep\", \"keeps\", \"novel\", \"presumably\", \"reasonably\", \"second\",\n",
    "    \"secondly\", \"sensible\", \"serious\", \"seriously\", \"sure\", \"t's\", \"third\",\n",
    "    \"thorough\", \"thoroughly\", \"three\", \"well\", \"wonder\", \"a\", \"about\", \"above\",\n",
    "    \"above\", \"across\", \"after\", \"afterwards\", \"again\", \"against\", \"all\",\n",
    "    \"almost\", \"alone\", \"along\", \"already\", \"also\", \"although\", \"always\", \"am\",\n",
    "    \"among\", \"amongst\", \"amoungst\", \"amount\", \"an\", \"and\", \"another\", \"any\",\n",
    "    \"anyhow\", \"anyone\", \"anything\", \"anyway\", \"anywhere\", \"are\", \"around\", \"as\",\n",
    "    \"at\", \"back\", \"be\", \"became\", \"because\", \"become\", \"becomes\", \"becoming\",\n",
    "    \"been\", \"before\", \"beforehand\", \"behind\", \"being\", \"below\", \"beside\",\n",
    "    \"besides\", \"between\", \"beyond\", \"bill\", \"both\", \"bottom\", \"but\", \"by\",\n",
    "    \"call\", \"can\", \"cannot\", \"cant\", \"co\", \"con\", \"could\", \"couldnt\", \"cry\",\n",
    "    \"de\", \"describe\", \"detail\", \"do\", \"done\", \"down\", \"due\", \"during\", \"each\",\n",
    "    \"eg\", \"eight\", \"either\", \"eleven\", \"else\", \"elsewhere\", \"empty\", \"enough\",\n",
    "    \"etc\", \"even\", \"ever\", \"every\", \"everyone\", \"everything\", \"everywhere\",\n",
    "    \"except\", \"few\", \"fifteen\", \"fify\", \"fill\", \"find\", \"first\", \"five\",\n",
    "    \"for\", \"former\", \"formerly\", \"forty\", \"found\", \"four\", \"from\", \"front\",\n",
    "    \"full\", \"further\", \"get\", \"give\", \"go\", \"had\", \"has\", \"hasnt\", \"have\", \"he\",\n",
    "    \"hence\", \"her\", \"here\", \"hereafter\", \"hereby\", \"herein\", \"hereupon\", \"hers\",\n",
    "    \"herself\", \"him\", \"himself\", \"his\", \"how\", \"however\", \"hundred\", \"ie\", \"if\",\n",
    "    \"in\", \"inc\", \"indeed\", \"interest\", \"into\", \"is\", \"it\", \"its\", \"itself\",\n",
    "    \"keep\", \"last\", \"latter\", \"latterly\", \"least\", \"less\", \"ltd\", \"made\",\n",
    "    \"many\", \"may\", \"me\", \"meanwhile\", \"might\", \"mill\", \"mine\", \"more\",\n",
    "    \"moreover\", \"most\", \"mostly\", \"move\", \"much\", \"must\", \"my\", \"myself\",\n",
    "    \"name\", \"namely\", \"neither\", \"never\", \"nevertheless\", \"next\", \"nine\", \"no\",\n",
    "    \"nobody\", \"none\", \"noone\", \"nor\", \"not\", \"nothing\", \"now\", \"nowhere\", \"of\",\n",
    "    \"off\", \"often\", \"on\", \"once\", \"one\", \"only\", \"onto\", \"or\", \"other\",\n",
    "    \"others\", \"otherwise\", \"our\", \"ours\", \"ourselves\", \"out\", \"over\", \"own\",\n",
    "    \"part\", \"per\", \"perhaps\", \"please\", \"put\", \"rather\", \"re\", \"same\", \"see\",\n",
    "    \"seem\", \"seemed\", \"seeming\", \"seems\", \"serious\", \"several\", \"she\", \"should\",\n",
    "    \"show\", \"side\", \"since\", \"sincere\", \"six\", \"sixty\", \"so\", \"some\", \"somehow\",\n",
    "    \"someone\", \"something\", \"sometime\", \"sometimes\", \"somewhere\", \"still\",\n",
    "    \"such\", \"system\", \"take\", \"ten\", \"than\", \"that\", \"the\", \"their\", \"them\",\n",
    "    \"themselves\", \"then\", \"thence\", \"there\", \"thereafter\", \"thereby\",\n",
    "    \"therefore\", \"therein\", \"thereupon\", \"these\", \"they\", \"thickv\", \"thin\",\n",
    "    \"third\", \"this\", \"those\", \"though\", \"three\", \"through\", \"throughout\",\n",
    "    \"thru\", \"thus\", \"to\", \"together\", \"too\", \"top\", \"toward\", \"towards\",\n",
    "    \"twelve\", \"twenty\", \"two\", \"un\", \"under\", \"until\", \"up\", \"upon\", \"us\",\n",
    "    \"very\", \"via\", \"was\", \"we\", \"well\", \"were\", \"what\", \"whatever\", \"when\",\n",
    "    \"whence\", \"whenever\", \"where\", \"whereafter\", \"whereas\", \"whereby\",\n",
    "    \"wherein\", \"whereupon\", \"wherever\", \"whether\", \"which\", \"while\", \"whither\",\n",
    "    \"who\", \"whoever\", \"whole\", \"whom\", \"whose\", \"why\", \"will\", \"with\", \"within\",\n",
    "    \"without\", \"would\", \"yet\", \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\",\n",
    "    \"the\", \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\",\n",
    "    \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\",\n",
    "    \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\",\n",
    "    \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"co\", \"op\", \"research-articl\",\n",
    "    \"pagecount\", \"cit\", \"ibid\", \"les\", \"le\", \"au\", \"que\", \"est\", \"pas\", \"vol\",\n",
    "    \"el\", \"los\", \"pp\", \"u201d\", \"well-b\", \"http\", \"volumtype\", \"par\", \"0o\",\n",
    "    \"0s\", \"3a\", \"3b\", \"3d\", \"6b\", \"6o\", \"a1\", \"a2\", \"a3\", \"a4\", \"ab\", \"ac\",\n",
    "    \"ad\", \"ae\", \"af\", \"ag\", \"aj\", \"al\", \"an\", \"ao\", \"ap\", \"ar\", \"av\", \"aw\",\n",
    "    \"ax\", \"ay\", \"az\", \"b1\", \"b2\", \"b3\", \"ba\", \"bc\", \"bd\", \"be\", \"bi\", \"bj\",\n",
    "    \"bk\", \"bl\", \"bn\", \"bp\", \"br\", \"bs\", \"bt\", \"bu\", \"bx\", \"c1\", \"c2\", \"c3\",\n",
    "    \"cc\", \"cd\", \"ce\", \"cf\", \"cg\", \"ch\", \"ci\", \"cj\", \"cl\", \"cm\", \"cn\", \"cp\",\n",
    "    \"cq\", \"cr\", \"cs\", \"ct\", \"cu\", \"cv\", \"cx\", \"cy\", \"cz\", \"d2\", \"da\", \"dc\",\n",
    "    \"dd\", \"de\", \"df\", \"di\", \"dj\", \"dk\", \"dl\", \"do\", \"dp\", \"dr\", \"ds\", \"dt\",\n",
    "    \"du\", \"dx\", \"dy\", \"e2\", \"e3\", \"ea\", \"ec\", \"ed\", \"ee\", \"ef\", \"ei\", \"ej\",\n",
    "    \"el\", \"em\", \"en\", \"eo\", \"ep\", \"eq\", \"er\", \"es\", \"et\", \"eu\", \"ev\", \"ex\",\n",
    "    \"ey\", \"f2\", \"fa\", \"fc\", \"ff\", \"fi\", \"fj\", \"fl\", \"fn\", \"fo\", \"fr\", \"fs\",\n",
    "    \"ft\", \"fu\", \"fy\", \"ga\", \"ge\", \"gi\", \"gj\", \"gl\", \"go\", \"gr\", \"gs\", \"gy\",\n",
    "    \"h2\", \"h3\", \"hh\", \"hi\", \"hj\", \"ho\", \"hr\", \"hs\", \"hu\", \"hy\", \"i\", \"i2\", \"i3\",\n",
    "    \"i4\", \"i6\", \"i7\", \"i8\", \"ia\", \"ib\", \"ic\", \"ie\", \"ig\", \"ih\", \"ii\", \"ij\",\n",
    "    \"il\", \"in\", \"io\", \"ip\", \"iq\", \"ir\", \"iv\", \"ix\", \"iy\", \"iz\", \"jj\", \"jr\",\n",
    "    \"js\", \"jt\", \"ju\", \"ke\", \"kg\", \"kj\", \"km\", \"ko\", \"l2\", \"la\", \"lb\", \"lc\",\n",
    "    \"lf\", \"lj\", \"ln\", \"lo\", \"lr\", \"ls\", \"lt\", \"m2\", \"ml\", \"mn\", \"mo\", \"ms\",\n",
    "    \"mt\", \"mu\", \"n2\", \"nc\", \"nd\", \"ne\", \"ng\", \"ni\", \"nj\", \"nl\", \"nn\", \"nr\",\n",
    "    \"ns\", \"nt\", \"ny\", \"oa\", \"ob\", \"oc\", \"od\", \"of\", \"og\", \"oi\", \"oj\", \"ol\",\n",
    "    \"om\", \"on\", \"oo\", \"oq\", \"or\", \"os\", \"ot\", \"ou\", \"ow\", \"ox\", \"oz\", \"p1\",\n",
    "    \"p2\", \"p3\", \"pc\", \"pd\", \"pe\", \"pf\", \"ph\", \"pi\", \"pj\", \"pk\", \"pl\", \"pm\",\n",
    "    \"pn\", \"po\", \"pq\", \"pr\", \"ps\", \"pt\", \"pu\", \"py\", \"qj\", \"qu\", \"r2\", \"ra\",\n",
    "    \"rc\", \"rd\", \"rf\", \"rh\", \"ri\", \"rj\", \"rl\", \"rm\", \"rn\", \"ro\", \"rq\", \"rr\",\n",
    "    \"rs\", \"rt\", \"ru\", \"rv\", \"ry\", \"s2\", \"sa\", \"sc\", \"sd\", \"se\", \"sf\", \"si\",\n",
    "    \"sj\", \"sl\", \"sm\", \"sn\", \"sp\", \"sq\", \"sr\", \"ss\", \"st\", \"sy\", \"sz\", \"t1\",\n",
    "    \"t2\", \"t3\", \"tb\", \"tc\", \"td\", \"te\", \"tf\", \"th\", \"ti\", \"tj\", \"tl\", \"tm\",\n",
    "    \"tn\", \"tp\", \"tq\", \"tr\", \"ts\", \"tt\", \"tv\", \"tx\", \"ue\", \"ui\", \"uj\", \"uk\",\n",
    "    \"um\", \"un\", \"uo\", \"ur\", \"ut\", \"va\", \"wa\", \"vd\", \"wi\", \"vj\", \"vo\", \"wo\",\n",
    "    \"vq\", \"vt\", \"vu\", \"x1\", \"x2\", \"x3\", \"xf\", \"xi\", \"xj\", \"xk\", \"xl\", \"xn\",\n",
    "    \"xo\", \"xs\", \"xt\", \"xv\", \"xx\", \"y2\", \"yj\", \"yl\", \"yr\", \"ys\", \"yt\", \"zi\", \"zz\"\n",
    "]\n",
    "\n",
    "\n",
    "# Reference : https://gist.github.com/slowkow/7a7f61f495e3dbb7e3d767f97bd7304b\n",
    "def remove_emoji(text):\n",
    "    emoji_pattern = re.compile(\n",
    "        \"[\"\n",
    "        u\"\\U0001F600-\\U0001F64F\"  # emoticons\n",
    "        u\"\\U0001F300-\\U0001F5FF\"  # symbols & pictographs\n",
    "        u\"\\U0001F680-\\U0001F6FF\"  # transport & map symbols\n",
    "        u\"\\U0001F1E0-\\U0001F1FF\"  # flags (iOS)\n",
    "        u\"\\U00002702-\\U000027B0\"\n",
    "        u\"\\U000024C2-\\U0001F251\"\n",
    "        \"]+\",\n",
    "        flags=re.UNICODE)\n",
    "    return emoji_pattern.sub(r'', text)\n",
    "\n",
    "\n",
    "# from: https://www.kaggle.com/shahules/basic-eda-cleaning-and-glove\n",
    "# maybe a bug, it removes question marks?\n",
    "spell = SpellChecker()\n",
    "\n",
    "def correct_spellings(text):\n",
    "    corrected_text = []\n",
    "    misspelled_words = spell.unknown(text.split())\n",
    "    for word in text.split():\n",
    "        if word in misspelled_words:\n",
    "            corrected_text.append(spell.correction(word))\n",
    "        else:\n",
    "            corrected_text.append(word)\n",
    "    return \" \".join(corrected_text)\n",
    "\n",
    "def remove_urls(text):\n",
    "    text = clean(r\"http\\S+\", text)\n",
    "    text = clean(r\"www\\S+\", text)\n",
    "    text = clean(r\"pic.twitter.com\\S+\", text)\n",
    "\n",
    "    return text\n",
    "\n",
    "def clean(reg_exp, text):\n",
    "    text = re.sub(reg_exp, \" \", text)\n",
    "\n",
    "    # replace multiple spaces with one.\n",
    "    text = re.sub('\\s{2,}', ' ', text)\n",
    "\n",
    "    return text\n",
    "\n",
    "lemmatizer = WordNetLemmatizer()\n",
    "\n",
    "def clean_all(t, correct_spelling=False, remove_stopwords=False, lemmatize=False):\n",
    "    \n",
    "    # first do bulk cleanup on tokens that don't depend on word tokenization\n",
    "\n",
    "    # remove xml tags\n",
    "    t = clean(r\"<[^>]+>\", t)\n",
    "    t = clean(r\"&lt;\", t)\n",
    "    t = clean(r\"&gt;\", t)\n",
    "\n",
    "    # remove URLs\n",
    "    t = remove_urls(t)\n",
    "\n",
    "    # https://stackoverflow.com/a/35041925\n",
    "    # replace multiple punctuation with single. Ex: !?!?!? would become ?\n",
    "    t = clean(r'[\\?\\.\\!]+(?=[\\?\\.\\!])', t)\n",
    "\n",
    "    t = remove_emoji(t)\n",
    "\n",
    "    # expand common contractions like \"I'm\" \"he'll\"\n",
    "    t = decontracted(t)\n",
    "\n",
    "    # now remove/expand bad patterns per word\n",
    "    words = word_tokenize(t)\n",
    "\n",
    "    # remove stopwords\n",
    "    if remove_stopwords is True:\n",
    "        words = [w for w in words if not w in stopwords]\n",
    "\n",
    "    clean_words = []\n",
    "\n",
    "    for w in words:\n",
    "        # normalize punctuation\n",
    "        w = re.sub(r'&', 'and', w)\n",
    "\n",
    "        # expand slang like OMG = Oh my God\n",
    "        w = unslang(w)\n",
    "\n",
    "        if lemmatize is True:\n",
    "            w = lemmatizer.lemmatize(w)\n",
    "        \n",
    "        clean_words.append(w)\n",
    "\n",
    "    # join the words back into a full string\n",
    "    t = untokenize(clean_words)\n",
    "\n",
    "    if correct_spelling is True:\n",
    "        # this resulted in lots of lost punctuation - omitting for now. Also greatly speeds things up\n",
    "        t = correct_spellings(t)\n",
    "\n",
    "    # finally, remove any non ascii and special characters that made it through\n",
    "    t = clean(r\"[^A-Za-z0-9\\.\\'!\\?,\\$]\", t)\n",
    "    return t\n",
    "\n",
    "\n",
    "def clean_dataframe(df, correct_spelling=False, remove_stopwords=False):\n",
    "    df['clean'] = df.apply(lambda x: clean_all(\n",
    "        x['text'], \n",
    "        correct_spelling=correct_spelling, \n",
    "        remove_stopwords=remove_stopwords), \n",
    "        axis=1\n",
    "    )\n",
    "    return df\n",
    "\n",
    "\n",
    "# https://towardsdatascience.com/make-your-own-super-pandas-using-multiproc-1c04f41944a1\n",
    "def parallelize_dataframe(\n",
    "        df, func, n_cores=2):  # I think Kaggle notebooks only have 2 cores?\n",
    "    df_split = np.array_split(df, n_cores)\n",
    "    pool = Pool(n_cores)\n",
    "    df = pd.concat(pool.map(func, df_split))\n",
    "    pool.close()\n",
    "    pool.join()\n",
    "\n",
    "    return df\n",
    "\n",
    "    # 得到编码\n",
    "def get_encode(data, token_dict, max_len=100):\n",
    "    '''\n",
    "    :param data:字符串数组[text1, text2, text3......]\n",
    "    :param token_dict:编码字典\n",
    "    :param max_len:最大字符长度\n",
    "    :return:[X1,X2]，其中X1是经过编码后的集合，X2表示第一句和第二句的位置，记录的是位置信息\n",
    "    '''\n",
    "\n",
    "    tokenizer = Tokenizer(token_dict)\n",
    "    X1 = []\n",
    "    X2 = []\n",
    "    for line in data:\n",
    "\n",
    "        # 将字符按照\n",
    "        x1, x2 = tokenizer.encode(first=line)\n",
    "        X1.append(x1)\n",
    "        X2.append(x2)\n",
    "    # 利用Keras API进行对数据集  补齐  操作。\n",
    "    # 与word2vec没什么区别，都需要进行补齐\n",
    "    X1 = sequence.pad_sequences(X1, maxlen=maxlen, padding='post', truncating='post')\n",
    "    X2 = sequence.pad_sequences(X2, maxlen=maxlen, padding='post', truncating='post')\n",
    "    return [X1, X2]\n",
    "\n",
    "def get_data(is_shuffle=False):\n",
    "    '''\n",
    "    读取数据的函数\n",
    "    :return: x_train, y_train, x_test, id_test\n",
    "    '''\n",
    "    train_df = pd.read_csv(\"./data/train.csv\")\n",
    "    test_df = pd.read_csv(\"./data/test.csv\")\n",
    "\n",
    "    y_train = train_df[['target']]\n",
    "    raw_x_train = train_df[['text']]\n",
    "    x_train = []\n",
    "    \n",
    "    raw_x_test = test_df[['text']]\n",
    "    x_test = []\n",
    "    id_test = test_df[['id']].values\n",
    "#     print(raw_x_train)\n",
    "\n",
    "    # super版数据清洗\n",
    "    raw_x_test = clean_dataframe(raw_x_test, correct_spelling=False, remove_stopwords=True)\n",
    "    raw_x_train = clean_dataframe(raw_x_train, correct_spelling=False, remove_stopwords=True)\n",
    "    \n",
    "    \n",
    "    \n",
    "    # 变成numpy数组\n",
    "    raw_x_train = raw_x_train[['clean']].values\n",
    "    raw_x_test = raw_x_test[['clean']].values\n",
    "\n",
    "    \n",
    "    for [centence] in raw_x_train:\n",
    "        x_train.append(centence)\n",
    "\n",
    "    for [centence] in raw_x_test:\n",
    "        x_test.append(centence)\n",
    "    \n",
    "    x_train = np.array(x_train)\n",
    "    y_train = np.array(y_train)\n",
    "    \n",
    "    x_test = np.array(x_test)\n",
    "    id_test = np.array(id_test)\n",
    "    \n",
    "    if is_shuffle:\n",
    "        x_train_index = [i for i in range(len(x_train))]\n",
    "        random.shuffle(x_train_index)\n",
    "        x_train = x_train[x_train_index]\n",
    "        y_train = y_train[x_train_index]\n",
    "    \n",
    "    gc.collect()\n",
    "    \n",
    "    return x_train, y_train, x_test, id_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Amber\\Anaconda3\\envs\\dl\\lib\\site-packages\\ipykernel_launcher.py:409: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n"
     ]
    }
   ],
   "source": [
    "x_train, y_train, x_test, id_test = get_data(is_shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array(['Our Deeds Reason earthquake May ALLAH Forgive',\n",
       "       'Forest fire La Ronge Sask. Canada',\n",
       "       \"All residents asked ishelter place' notified officers. No evacuation shelter place orders expected\",\n",
       "       ..., 'M1.94 01 04 UTC ? 5km Volcano Hawaii.',\n",
       "       'Police investigating e bike collided car Little Portugal. E bike rider suffered non life threatening injuries.',\n",
       "       'The Latest More Homes Razed Northern California Wildfire ABC News'],\n",
       "      dtype='<U153')"
      ]
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[array([[  101, 14008, 18589, ...,     0,     0,     0],\n",
       "        [  101, 14958, 12319, ...,     0,     0,     0],\n",
       "        [  101, 10367, 20679, ...,     0,     0,     0],\n",
       "        ...,\n",
       "        [  101, 50957,   119, ...,     0,     0,     0],\n",
       "        [  101, 13202, 10104, ...,     0,     0,     0],\n",
       "        [  101, 10103, 40883, ...,     0,     0,     0]]),\n",
       " array([[0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        ...,\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0],\n",
       "        [0, 0, 0, ..., 0, 0, 0]])]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "get_encode(x_train, get_token_dict(dict_path), max_len=maxlen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import f1_score, recall_score, precision_score\n",
    "from keras.callbacks import Callback\n",
    "\n",
    "def build_bert_model(X1, X2):\n",
    "    '''\n",
    "    :param X1:经过编码过后的集合\n",
    "    :param X2:经过编码过后的位置集合\n",
    "    :return:模型\n",
    "    '''\n",
    "\n",
    "    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n",
    "\n",
    "    wordvec = bert_model.predict([X1, X2])\n",
    "    # wordvec就是得到的向量矩阵\n",
    "    return wordvec\n",
    "\n",
    "\n",
    "def boolMap(arr):\n",
    "    if arr > 0.5:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n",
    "\n",
    "# F1模型\n",
    "class Metrics(Callback):\n",
    "    def __init__(self, filepath):\n",
    "        self.file_path = filepath\n",
    "\n",
    "    def on_train_begin(self, logs=None):\n",
    "        self.val_f1s = []\n",
    "        self.best_val_f1 = 0\n",
    "        self.val_recalls = []\n",
    "        self.val_precisions = []\n",
    "\n",
    "    def on_epoch_end(self, epoch, logs=None):\n",
    "        val_predict = list(map(boolMap, self.model.predict([self.validation_data[0], self.validation_data[1]])))\n",
    "        val_targ = self.validation_data[2]\n",
    "        _val_f1 = f1_score(val_targ, val_predict)\n",
    "        _val_recall = recall_score(val_targ, val_predict)\n",
    "        _val_precision = precision_score(val_targ, val_predict)\n",
    "        self.val_f1s.append(_val_f1)\n",
    "        self.val_recalls.append(_val_recall)\n",
    "        self.val_precisions.append(_val_precision)\n",
    "        print(_val_f1, _val_precision, _val_recall)\n",
    "        print(\"max f1\")\n",
    "        print(max(self.val_f1s))\n",
    "        if _val_f1 > self.best_val_f1:\n",
    "            self.model.save_weights(self.file_path, overwrite=True)\n",
    "            self.best_val_f1 = _val_f1\n",
    "            print(\"best f1: {}\".format(self.best_val_f1))\n",
    "        else:\n",
    "            print(\"val f1: {}, but not the best f1\".format(_val_f1))\n",
    "        return\n",
    "\n",
    "def build_model():\n",
    "    model = Sequential()\n",
    "    model.add(Bidirectional(LSTM(128)))\n",
    "#     model.add(Dropout(0.5))\n",
    "    model.add(Dense(1, activation=\"sigmoid\"))\n",
    "    model.compile(loss=\"binary_crossentropy\", optimizer=Adam(1e-5), metrics=['accuracy'])\n",
    "\n",
    "    return model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Amber\\Anaconda3\\envs\\dl\\lib\\site-packages\\ipykernel_launcher.py:409: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n"
     ]
    }
   ],
   "source": [
    "x_train, y_train, x_test, id_test = get_data()\n",
    "token_dict = get_token_dict(dict_path)\n",
    "\n",
    "# get_encode()\n",
    "[X1_train, X2_train] = get_encode(x_train, token_dict)\n",
    "\n",
    "wordvec = build_bert_model(X1_train, X2_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 6851 samples, validate on 762 samples\n",
      "Epoch 1/12\n",
      "6851/6851 [==============================] - 71s 10ms/step - loss: 0.5873 - accuracy: 0.7158 - val_loss: 0.5133 - val_accuracy: 0.7756\n",
      "Epoch 2/12\n",
      "6851/6851 [==============================] - 71s 10ms/step - loss: 0.4874 - accuracy: 0.7851 - val_loss: 0.4542 - val_accuracy: 0.7848\n",
      "Epoch 3/12\n",
      "6851/6851 [==============================] - 71s 10ms/step - loss: 0.4575 - accuracy: 0.7927 - val_loss: 0.4354 - val_accuracy: 0.7940\n",
      "Epoch 4/12\n",
      "6851/6851 [==============================] - 72s 11ms/step - loss: 0.4429 - accuracy: 0.8018 - val_loss: 0.4213 - val_accuracy: 0.7966\n",
      "Epoch 5/12\n",
      "6851/6851 [==============================] - 71s 10ms/step - loss: 0.4322 - accuracy: 0.8089 - val_loss: 0.4165 - val_accuracy: 0.7966\n",
      "Epoch 6/12\n",
      "6851/6851 [==============================] - 69s 10ms/step - loss: 0.4250 - accuracy: 0.8113 - val_loss: 0.4116 - val_accuracy: 0.8005\n",
      "Epoch 7/12\n",
      "6851/6851 [==============================] - 72s 10ms/step - loss: 0.4192 - accuracy: 0.8170 - val_loss: 0.4081 - val_accuracy: 0.8005\n",
      "Epoch 8/12\n",
      "6851/6851 [==============================] - 70s 10ms/step - loss: 0.4136 - accuracy: 0.8197 - val_loss: 0.4071 - val_accuracy: 0.8031\n",
      "Epoch 9/12\n",
      "6851/6851 [==============================] - 70s 10ms/step - loss: 0.4086 - accuracy: 0.8212 - val_loss: 0.4096 - val_accuracy: 0.8031\n",
      "Epoch 10/12\n",
      "6851/6851 [==============================] - 70s 10ms/step - loss: 0.4028 - accuracy: 0.8221 - val_loss: 0.4071 - val_accuracy: 0.8097\n",
      "Epoch 11/12\n",
      "6851/6851 [==============================] - 70s 10ms/step - loss: 0.3987 - accuracy: 0.8240 - val_loss: 0.4079 - val_accuracy: 0.8031\n",
      "Epoch 12/12\n",
      "6851/6851 [==============================] - 70s 10ms/step - loss: 0.3949 - accuracy: 0.8291 - val_loss: 0.4105 - val_accuracy: 0.8005\n"
     ]
    }
   ],
   "source": [
    "model = build_model()\n",
    "model.fit(wordvec, y_train, batch_size=16, epochs=12, validation_split=0.1)\n",
    "yaml_string = model.save(\"test_keras_bert.h5\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4.开始预测 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据处理\n",
    "[X1_test, X2_test] = get_encode(x_test, token_dict)\n",
    "wordvec_test = build_bert_model(X1_test, X2_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = model.predict(wordvec_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "final_result = list(map(lambda i: 1 if i>0.5 else 0, result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "submit_result = pd.DataFrame({'id': np.squeeze(id_test), 'target': final_result})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "submit_result.to_csv('bert_result_super_preprocess_6.txt', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 5.交叉验证版本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.wrappers.scikit_learn import KerasClassifier\n",
    "from sklearn.model_selection import GridSearchCV"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/10\n",
      "4872/4872 [==============================] - 50s 10ms/step - loss: 0.6182 - accuracy: 0.6769 - val_loss: 0.5452 - val_accuracy: 0.7529\n",
      "Epoch 2/10\n",
      "4872/4872 [==============================] - 50s 10ms/step - loss: 0.5271 - accuracy: 0.7594 - val_loss: 0.4866 - val_accuracy: 0.7709\n",
      "Epoch 3/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4887 - accuracy: 0.7765 - val_loss: 0.4701 - val_accuracy: 0.7709\n",
      "Epoch 4/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.4666 - accuracy: 0.7952 - val_loss: 0.4436 - val_accuracy: 0.7833\n",
      "Epoch 5/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4508 - accuracy: 0.7995 - val_loss: 0.4374 - val_accuracy: 0.7841\n",
      "Epoch 6/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4392 - accuracy: 0.8030 - val_loss: 0.4275 - val_accuracy: 0.8038\n",
      "Epoch 7/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4310 - accuracy: 0.8089 - val_loss: 0.4223 - val_accuracy: 0.8054\n",
      "Epoch 8/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4233 - accuracy: 0.8124 - val_loss: 0.4230 - val_accuracy: 0.7939\n",
      "Epoch 9/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4157 - accuracy: 0.8171 - val_loss: 0.4193 - val_accuracy: 0.8062\n",
      "Epoch 10/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4097 - accuracy: 0.8210 - val_loss: 0.4177 - val_accuracy: 0.8103\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.6188 - accuracy: 0.6794 - val_loss: 0.5676 - val_accuracy: 0.7373\n",
      "Epoch 2/10\n",
      "4872/4872 [==============================] - 50s 10ms/step - loss: 0.5189 - accuracy: 0.7724 - val_loss: 0.4886 - val_accuracy: 0.7677\n",
      "Epoch 3/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4689 - accuracy: 0.7898 - val_loss: 0.4559 - val_accuracy: 0.7742\n",
      "Epoch 4/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.4458 - accuracy: 0.8007 - val_loss: 0.4434 - val_accuracy: 0.7849\n",
      "Epoch 5/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4321 - accuracy: 0.8108 - val_loss: 0.4312 - val_accuracy: 0.7915\n",
      "Epoch 6/10\n",
      "4872/4872 [==============================] - 50s 10ms/step - loss: 0.4231 - accuracy: 0.8132 - val_loss: 0.4240 - val_accuracy: 0.7947\n",
      "Epoch 7/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4156 - accuracy: 0.8144 - val_loss: 0.4225 - val_accuracy: 0.7980\n",
      "Epoch 8/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4096 - accuracy: 0.8190 - val_loss: 0.4183 - val_accuracy: 0.8021\n",
      "Epoch 9/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4037 - accuracy: 0.8229 - val_loss: 0.4145 - val_accuracy: 0.8136\n",
      "Epoch 10/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3988 - accuracy: 0.8261 - val_loss: 0.4151 - val_accuracy: 0.8079\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.6006 - accuracy: 0.6931 - val_loss: 0.5565 - val_accuracy: 0.7463\n",
      "Epoch 2/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.5079 - accuracy: 0.7720 - val_loss: 0.4857 - val_accuracy: 0.7726\n",
      "Epoch 3/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4680 - accuracy: 0.7929 - val_loss: 0.4585 - val_accuracy: 0.7833\n",
      "Epoch 4/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4453 - accuracy: 0.7997 - val_loss: 0.4401 - val_accuracy: 0.7906\n",
      "Epoch 5/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4298 - accuracy: 0.8101 - val_loss: 0.4337 - val_accuracy: 0.7939\n",
      "Epoch 6/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4183 - accuracy: 0.8165 - val_loss: 0.4276 - val_accuracy: 0.7939\n",
      "Epoch 7/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4105 - accuracy: 0.8198 - val_loss: 0.4208 - val_accuracy: 0.8103\n",
      "Epoch 8/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4033 - accuracy: 0.8206 - val_loss: 0.4314 - val_accuracy: 0.7882\n",
      "Epoch 9/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3972 - accuracy: 0.8259 - val_loss: 0.4210 - val_accuracy: 0.8030\n",
      "Epoch 10/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3917 - accuracy: 0.8290 - val_loss: 0.4237 - val_accuracy: 0.7997\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1219 samples\n",
      "Epoch 1/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.6103 - accuracy: 0.6897 - val_loss: 0.5436 - val_accuracy: 0.7564\n",
      "Epoch 2/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.5172 - accuracy: 0.7748 - val_loss: 0.4774 - val_accuracy: 0.7695\n",
      "Epoch 3/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4754 - accuracy: 0.7894 - val_loss: 0.4527 - val_accuracy: 0.7810\n",
      "Epoch 4/10\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.4524 - accuracy: 0.7991 - val_loss: 0.4370 - val_accuracy: 0.7884\n",
      "Epoch 5/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4370 - accuracy: 0.8054 - val_loss: 0.4270 - val_accuracy: 0.7916\n",
      "Epoch 6/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4265 - accuracy: 0.8099 - val_loss: 0.4191 - val_accuracy: 0.7998\n",
      "Epoch 7/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4177 - accuracy: 0.8147 - val_loss: 0.4223 - val_accuracy: 0.8023\n",
      "Epoch 8/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4118 - accuracy: 0.8163 - val_loss: 0.4129 - val_accuracy: 0.8089\n",
      "Epoch 9/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4049 - accuracy: 0.8216 - val_loss: 0.4144 - val_accuracy: 0.8097\n",
      "Epoch 10/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3987 - accuracy: 0.8225 - val_loss: 0.4113 - val_accuracy: 0.8121\n",
      "1522/1522 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1219 samples\n",
      "Epoch 1/10\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.6255 - accuracy: 0.6603 - val_loss: 0.5543 - val_accuracy: 0.7539\n",
      "Epoch 2/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.5248 - accuracy: 0.7668 - val_loss: 0.4894 - val_accuracy: 0.7695\n",
      "Epoch 3/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4815 - accuracy: 0.7794 - val_loss: 0.4595 - val_accuracy: 0.7990\n",
      "Epoch 4/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4612 - accuracy: 0.7876 - val_loss: 0.4462 - val_accuracy: 0.8064\n",
      "Epoch 5/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4466 - accuracy: 0.7972 - val_loss: 0.4394 - val_accuracy: 0.8162\n",
      "Epoch 6/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4365 - accuracy: 0.8028 - val_loss: 0.4358 - val_accuracy: 0.8154\n",
      "Epoch 7/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4292 - accuracy: 0.8085 - val_loss: 0.4344 - val_accuracy: 0.8130\n",
      "Epoch 8/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4218 - accuracy: 0.8108 - val_loss: 0.4330 - val_accuracy: 0.8089\n",
      "Epoch 9/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4146 - accuracy: 0.8177 - val_loss: 0.4321 - val_accuracy: 0.8064\n",
      "Epoch 10/10\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4091 - accuracy: 0.8177 - val_loss: 0.4330 - val_accuracy: 0.8105\n",
      "1522/1522 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.6297 - accuracy: 0.6671 - val_loss: 0.5536 - val_accuracy: 0.7652\n",
      "Epoch 2/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.5296 - accuracy: 0.7623 - val_loss: 0.4786 - val_accuracy: 0.7709\n",
      "Epoch 3/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4829 - accuracy: 0.7810 - val_loss: 0.4532 - val_accuracy: 0.7849\n",
      "Epoch 4/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4603 - accuracy: 0.7956 - val_loss: 0.4350 - val_accuracy: 0.7882\n",
      "Epoch 5/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4472 - accuracy: 0.7997 - val_loss: 0.4262 - val_accuracy: 0.7939\n",
      "Epoch 6/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4369 - accuracy: 0.8064 - val_loss: 0.4191 - val_accuracy: 0.8054\n",
      "Epoch 7/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4294 - accuracy: 0.8108 - val_loss: 0.4170 - val_accuracy: 0.7989\n",
      "Epoch 8/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4231 - accuracy: 0.8147 - val_loss: 0.4152 - val_accuracy: 0.8005\n",
      "Epoch 9/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4171 - accuracy: 0.8149 - val_loss: 0.4143 - val_accuracy: 0.7997\n",
      "Epoch 10/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4106 - accuracy: 0.8183 - val_loss: 0.4129 - val_accuracy: 0.8054\n",
      "Epoch 11/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4046 - accuracy: 0.8237 - val_loss: 0.4112 - val_accuracy: 0.8062\n",
      "Epoch 12/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3990 - accuracy: 0.8292 - val_loss: 0.4127 - val_accuracy: 0.8013\n",
      "Epoch 13/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3935 - accuracy: 0.8317 - val_loss: 0.4129 - val_accuracy: 0.8079\n",
      "Epoch 14/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3884 - accuracy: 0.8333 - val_loss: 0.4134 - val_accuracy: 0.8120\n",
      "Epoch 15/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3837 - accuracy: 0.8350 - val_loss: 0.4154 - val_accuracy: 0.8062\n",
      "Epoch 16/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3775 - accuracy: 0.8405 - val_loss: 0.4171 - val_accuracy: 0.8071\n",
      "Epoch 17/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3723 - accuracy: 0.8438 - val_loss: 0.4191 - val_accuracy: 0.8103\n",
      "Epoch 18/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3668 - accuracy: 0.8444 - val_loss: 0.4192 - val_accuracy: 0.8112\n",
      "Epoch 19/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3609 - accuracy: 0.8469 - val_loss: 0.4218 - val_accuracy: 0.8087\n",
      "Epoch 20/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3556 - accuracy: 0.8522 - val_loss: 0.4265 - val_accuracy: 0.8030\n",
      "Epoch 21/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3499 - accuracy: 0.8559 - val_loss: 0.4275 - val_accuracy: 0.8071\n",
      "Epoch 22/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3433 - accuracy: 0.8596 - val_loss: 0.4294 - val_accuracy: 0.8046\n",
      "Epoch 23/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3375 - accuracy: 0.8617 - val_loss: 0.4335 - val_accuracy: 0.8062\n",
      "Epoch 24/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3305 - accuracy: 0.8647 - val_loss: 0.4378 - val_accuracy: 0.8103\n",
      "Epoch 25/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3231 - accuracy: 0.8719 - val_loss: 0.4389 - val_accuracy: 0.8120\n",
      "Epoch 26/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.3177 - accuracy: 0.8740 - val_loss: 0.4487 - val_accuracy: 0.8054\n",
      "Epoch 27/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3117 - accuracy: 0.8738 - val_loss: 0.4480 - val_accuracy: 0.8054\n",
      "Epoch 28/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3048 - accuracy: 0.8793 - val_loss: 0.4538 - val_accuracy: 0.8046\n",
      "Epoch 29/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2988 - accuracy: 0.8824 - val_loss: 0.4571 - val_accuracy: 0.7972\n",
      "Epoch 30/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2902 - accuracy: 0.8863 - val_loss: 0.4639 - val_accuracy: 0.7939\n",
      "Epoch 31/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2861 - accuracy: 0.8853 - val_loss: 0.4726 - val_accuracy: 0.7956\n",
      "Epoch 32/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.2788 - accuracy: 0.8904 - val_loss: 0.4761 - val_accuracy: 0.7956\n",
      "Epoch 33/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2695 - accuracy: 0.8910 - val_loss: 0.4841 - val_accuracy: 0.7906\n",
      "Epoch 34/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2614 - accuracy: 0.8943 - val_loss: 0.4896 - val_accuracy: 0.7874\n",
      "Epoch 35/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2585 - accuracy: 0.8978 - val_loss: 0.5010 - val_accuracy: 0.7857\n",
      "Epoch 36/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2514 - accuracy: 0.9013 - val_loss: 0.5146 - val_accuracy: 0.7939\n",
      "Epoch 37/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2447 - accuracy: 0.9068 - val_loss: 0.4997 - val_accuracy: 0.7931\n",
      "Epoch 38/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2350 - accuracy: 0.9089 - val_loss: 0.5151 - val_accuracy: 0.7898\n",
      "Epoch 39/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2277 - accuracy: 0.9136 - val_loss: 0.5243 - val_accuracy: 0.7808\n",
      "Epoch 40/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2224 - accuracy: 0.9152 - val_loss: 0.5252 - val_accuracy: 0.7865\n",
      "Epoch 41/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.2169 - accuracy: 0.9187 - val_loss: 0.5406 - val_accuracy: 0.7890\n",
      "Epoch 42/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2067 - accuracy: 0.9234 - val_loss: 0.5421 - val_accuracy: 0.7841\n",
      "Epoch 43/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2000 - accuracy: 0.9253 - val_loss: 0.5649 - val_accuracy: 0.7898\n",
      "Epoch 44/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2025 - accuracy: 0.9255 - val_loss: 0.5741 - val_accuracy: 0.7734\n",
      "Epoch 45/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1910 - accuracy: 0.9275 - val_loss: 0.5712 - val_accuracy: 0.7874\n",
      "Epoch 46/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1842 - accuracy: 0.9337 - val_loss: 0.5879 - val_accuracy: 0.7841\n",
      "Epoch 47/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1795 - accuracy: 0.9351 - val_loss: 0.5953 - val_accuracy: 0.7759\n",
      "Epoch 48/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1723 - accuracy: 0.9397 - val_loss: 0.6136 - val_accuracy: 0.7791\n",
      "Epoch 49/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1671 - accuracy: 0.9399 - val_loss: 0.6198 - val_accuracy: 0.7668\n",
      "Epoch 50/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1627 - accuracy: 0.9438 - val_loss: 0.6441 - val_accuracy: 0.7635\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.6016 - accuracy: 0.7145 - val_loss: 0.5478 - val_accuracy: 0.7570\n",
      "Epoch 2/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.5067 - accuracy: 0.7759 - val_loss: 0.4776 - val_accuracy: 0.7685\n",
      "Epoch 3/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4643 - accuracy: 0.7908 - val_loss: 0.4544 - val_accuracy: 0.7808\n",
      "Epoch 4/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4436 - accuracy: 0.8019 - val_loss: 0.4335 - val_accuracy: 0.7972\n",
      "Epoch 5/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4300 - accuracy: 0.8099 - val_loss: 0.4226 - val_accuracy: 0.8005\n",
      "Epoch 6/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4192 - accuracy: 0.8157 - val_loss: 0.4182 - val_accuracy: 0.8021\n",
      "Epoch 7/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4127 - accuracy: 0.8192 - val_loss: 0.4137 - val_accuracy: 0.8079\n",
      "Epoch 8/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4062 - accuracy: 0.8225 - val_loss: 0.4150 - val_accuracy: 0.8054\n",
      "Epoch 9/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4018 - accuracy: 0.8268 - val_loss: 0.4129 - val_accuracy: 0.8153\n",
      "Epoch 10/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3959 - accuracy: 0.8307 - val_loss: 0.4130 - val_accuracy: 0.8087\n",
      "Epoch 11/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3922 - accuracy: 0.8342 - val_loss: 0.4110 - val_accuracy: 0.8112\n",
      "Epoch 12/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3859 - accuracy: 0.8376 - val_loss: 0.4114 - val_accuracy: 0.8128\n",
      "Epoch 13/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3803 - accuracy: 0.8389 - val_loss: 0.4163 - val_accuracy: 0.8079\n",
      "Epoch 14/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3754 - accuracy: 0.8405 - val_loss: 0.4125 - val_accuracy: 0.8103\n",
      "Epoch 15/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3706 - accuracy: 0.8444 - val_loss: 0.4132 - val_accuracy: 0.8103\n",
      "Epoch 16/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3666 - accuracy: 0.8434 - val_loss: 0.4153 - val_accuracy: 0.8079\n",
      "Epoch 17/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3589 - accuracy: 0.8463 - val_loss: 0.4196 - val_accuracy: 0.8038\n",
      "Epoch 18/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3542 - accuracy: 0.8500 - val_loss: 0.4216 - val_accuracy: 0.8087\n",
      "Epoch 19/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3468 - accuracy: 0.8563 - val_loss: 0.4249 - val_accuracy: 0.8112\n",
      "Epoch 20/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3412 - accuracy: 0.8588 - val_loss: 0.4281 - val_accuracy: 0.8087\n",
      "Epoch 21/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3350 - accuracy: 0.8598 - val_loss: 0.4359 - val_accuracy: 0.8079\n",
      "Epoch 22/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3282 - accuracy: 0.8604 - val_loss: 0.4389 - val_accuracy: 0.8112\n",
      "Epoch 23/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3218 - accuracy: 0.8656 - val_loss: 0.4417 - val_accuracy: 0.8079\n",
      "Epoch 24/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3144 - accuracy: 0.8666 - val_loss: 0.4520 - val_accuracy: 0.7964\n",
      "Epoch 25/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3092 - accuracy: 0.8705 - val_loss: 0.4655 - val_accuracy: 0.7906\n",
      "Epoch 26/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3023 - accuracy: 0.8744 - val_loss: 0.4607 - val_accuracy: 0.8054\n",
      "Epoch 27/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2940 - accuracy: 0.8801 - val_loss: 0.4739 - val_accuracy: 0.8021\n",
      "Epoch 28/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2865 - accuracy: 0.8859 - val_loss: 0.4804 - val_accuracy: 0.7939\n",
      "Epoch 29/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2791 - accuracy: 0.8851 - val_loss: 0.4857 - val_accuracy: 0.7915\n",
      "Epoch 30/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2717 - accuracy: 0.8931 - val_loss: 0.4858 - val_accuracy: 0.7956\n",
      "Epoch 31/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2652 - accuracy: 0.8937 - val_loss: 0.5013 - val_accuracy: 0.7931\n",
      "Epoch 32/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2582 - accuracy: 0.8970 - val_loss: 0.5014 - val_accuracy: 0.7947\n",
      "Epoch 33/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2510 - accuracy: 0.9019 - val_loss: 0.5146 - val_accuracy: 0.7898\n",
      "Epoch 34/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2432 - accuracy: 0.9058 - val_loss: 0.5252 - val_accuracy: 0.7882\n",
      "Epoch 35/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2345 - accuracy: 0.9109 - val_loss: 0.5422 - val_accuracy: 0.7791\n",
      "Epoch 36/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2291 - accuracy: 0.9087 - val_loss: 0.5438 - val_accuracy: 0.7882\n",
      "Epoch 37/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2217 - accuracy: 0.9128 - val_loss: 0.5529 - val_accuracy: 0.7775\n",
      "Epoch 38/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2114 - accuracy: 0.9191 - val_loss: 0.5646 - val_accuracy: 0.7775\n",
      "Epoch 39/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2052 - accuracy: 0.9222 - val_loss: 0.5693 - val_accuracy: 0.7824\n",
      "Epoch 40/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1961 - accuracy: 0.9251 - val_loss: 0.5773 - val_accuracy: 0.7800\n",
      "Epoch 41/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1918 - accuracy: 0.9304 - val_loss: 0.5842 - val_accuracy: 0.7718\n",
      "Epoch 42/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1861 - accuracy: 0.9298 - val_loss: 0.5942 - val_accuracy: 0.7783\n",
      "Epoch 43/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1768 - accuracy: 0.9351 - val_loss: 0.6099 - val_accuracy: 0.7726\n",
      "Epoch 44/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1684 - accuracy: 0.9386 - val_loss: 0.6298 - val_accuracy: 0.7709\n",
      "Epoch 45/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1661 - accuracy: 0.9405 - val_loss: 0.6281 - val_accuracy: 0.7750\n",
      "Epoch 46/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1561 - accuracy: 0.9438 - val_loss: 0.6446 - val_accuracy: 0.7718\n",
      "Epoch 47/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1494 - accuracy: 0.9460 - val_loss: 0.6600 - val_accuracy: 0.7783\n",
      "Epoch 48/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1416 - accuracy: 0.9483 - val_loss: 0.6788 - val_accuracy: 0.7660\n",
      "Epoch 49/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1402 - accuracy: 0.9522 - val_loss: 0.6688 - val_accuracy: 0.7767\n",
      "Epoch 50/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1411 - accuracy: 0.9514 - val_loss: 0.6583 - val_accuracy: 0.7529\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.5987 - accuracy: 0.6892 - val_loss: 0.5463 - val_accuracy: 0.7504\n",
      "Epoch 2/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.4985 - accuracy: 0.7878 - val_loss: 0.4840 - val_accuracy: 0.7644\n",
      "Epoch 3/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4585 - accuracy: 0.7974 - val_loss: 0.4458 - val_accuracy: 0.7734\n",
      "Epoch 4/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4369 - accuracy: 0.8095 - val_loss: 0.4335 - val_accuracy: 0.7939\n",
      "Epoch 5/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4239 - accuracy: 0.8169 - val_loss: 0.4238 - val_accuracy: 0.7989\n",
      "Epoch 6/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4142 - accuracy: 0.8235 - val_loss: 0.4179 - val_accuracy: 0.8021\n",
      "Epoch 7/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4066 - accuracy: 0.8261 - val_loss: 0.4188 - val_accuracy: 0.7997\n",
      "Epoch 8/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.4007 - accuracy: 0.8298 - val_loss: 0.4167 - val_accuracy: 0.8021\n",
      "Epoch 9/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3937 - accuracy: 0.8350 - val_loss: 0.4121 - val_accuracy: 0.8079\n",
      "Epoch 10/50\n",
      "4872/4872 [==============================] - 46s 10ms/step - loss: 0.3881 - accuracy: 0.8387 - val_loss: 0.4115 - val_accuracy: 0.8079\n",
      "Epoch 11/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3826 - accuracy: 0.8399 - val_loss: 0.4117 - val_accuracy: 0.8062\n",
      "Epoch 12/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3781 - accuracy: 0.8424 - val_loss: 0.4130 - val_accuracy: 0.8062\n",
      "Epoch 13/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3725 - accuracy: 0.8426 - val_loss: 0.4230 - val_accuracy: 0.7997\n",
      "Epoch 14/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3667 - accuracy: 0.8471 - val_loss: 0.4238 - val_accuracy: 0.7939\n",
      "Epoch 15/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3613 - accuracy: 0.8516 - val_loss: 0.4199 - val_accuracy: 0.8030\n",
      "Epoch 16/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3559 - accuracy: 0.8537 - val_loss: 0.4216 - val_accuracy: 0.7997\n",
      "Epoch 17/50\n",
      "4872/4872 [==============================] - 46s 10ms/step - loss: 0.3509 - accuracy: 0.8573 - val_loss: 0.4263 - val_accuracy: 0.8054\n",
      "Epoch 18/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3454 - accuracy: 0.8610 - val_loss: 0.4261 - val_accuracy: 0.8005\n",
      "Epoch 19/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3394 - accuracy: 0.8639 - val_loss: 0.4295 - val_accuracy: 0.8005\n",
      "Epoch 20/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3338 - accuracy: 0.8674 - val_loss: 0.4357 - val_accuracy: 0.7989\n",
      "Epoch 21/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3270 - accuracy: 0.8701 - val_loss: 0.4388 - val_accuracy: 0.7972\n",
      "Epoch 22/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3227 - accuracy: 0.8754 - val_loss: 0.4445 - val_accuracy: 0.8013\n",
      "Epoch 23/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3174 - accuracy: 0.8738 - val_loss: 0.4427 - val_accuracy: 0.7980\n",
      "Epoch 24/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3109 - accuracy: 0.8764 - val_loss: 0.4434 - val_accuracy: 0.7947\n",
      "Epoch 25/50\n",
      "4872/4872 [==============================] - 46s 10ms/step - loss: 0.3048 - accuracy: 0.8807 - val_loss: 0.4480 - val_accuracy: 0.7890\n",
      "Epoch 26/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.3004 - accuracy: 0.8830 - val_loss: 0.4499 - val_accuracy: 0.7980\n",
      "Epoch 27/50\n",
      "4872/4872 [==============================] - 46s 10ms/step - loss: 0.2922 - accuracy: 0.8824 - val_loss: 0.4594 - val_accuracy: 0.7947\n",
      "Epoch 28/50\n",
      "4872/4872 [==============================] - 46s 10ms/step - loss: 0.2874 - accuracy: 0.8900 - val_loss: 0.4574 - val_accuracy: 0.7964\n",
      "Epoch 29/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2784 - accuracy: 0.8920 - val_loss: 0.4636 - val_accuracy: 0.7947\n",
      "Epoch 30/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2743 - accuracy: 0.8935 - val_loss: 0.4712 - val_accuracy: 0.7865\n",
      "Epoch 31/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2656 - accuracy: 0.8974 - val_loss: 0.4761 - val_accuracy: 0.7874\n",
      "Epoch 32/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2595 - accuracy: 0.9013 - val_loss: 0.4894 - val_accuracy: 0.7841\n",
      "Epoch 33/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2523 - accuracy: 0.9029 - val_loss: 0.4886 - val_accuracy: 0.7833\n",
      "Epoch 34/50\n",
      "4872/4872 [==============================] - 46s 9ms/step - loss: 0.2474 - accuracy: 0.9044 - val_loss: 0.4979 - val_accuracy: 0.7874\n",
      "Epoch 35/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2395 - accuracy: 0.9091 - val_loss: 0.5399 - val_accuracy: 0.7759\n",
      "Epoch 36/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2363 - accuracy: 0.9099 - val_loss: 0.5103 - val_accuracy: 0.7923\n",
      "Epoch 37/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2272 - accuracy: 0.9161 - val_loss: 0.5146 - val_accuracy: 0.7890\n",
      "Epoch 38/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2193 - accuracy: 0.9181 - val_loss: 0.5230 - val_accuracy: 0.7882\n",
      "Epoch 39/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2127 - accuracy: 0.9204 - val_loss: 0.5352 - val_accuracy: 0.7882\n",
      "Epoch 40/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2099 - accuracy: 0.9208 - val_loss: 0.5549 - val_accuracy: 0.7783\n",
      "Epoch 41/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2000 - accuracy: 0.9241 - val_loss: 0.5479 - val_accuracy: 0.7865\n",
      "Epoch 42/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1935 - accuracy: 0.9290 - val_loss: 0.5505 - val_accuracy: 0.7857\n",
      "Epoch 43/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1872 - accuracy: 0.9323 - val_loss: 0.5608 - val_accuracy: 0.7882\n",
      "Epoch 44/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1821 - accuracy: 0.9317 - val_loss: 0.5812 - val_accuracy: 0.7759\n",
      "Epoch 45/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1746 - accuracy: 0.9366 - val_loss: 0.5749 - val_accuracy: 0.7898\n",
      "Epoch 46/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1691 - accuracy: 0.9384 - val_loss: 0.5976 - val_accuracy: 0.7808\n",
      "Epoch 47/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1635 - accuracy: 0.9405 - val_loss: 0.5938 - val_accuracy: 0.7849\n",
      "Epoch 48/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1565 - accuracy: 0.9431 - val_loss: 0.6175 - val_accuracy: 0.7841\n",
      "Epoch 49/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1526 - accuracy: 0.9458 - val_loss: 0.6096 - val_accuracy: 0.7800\n",
      "Epoch 50/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1475 - accuracy: 0.9481 - val_loss: 0.6272 - val_accuracy: 0.7841\n",
      "1523/1523 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1219 samples\n",
      "Epoch 1/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.6122 - accuracy: 0.6796 - val_loss: 0.5431 - val_accuracy: 0.7588\n",
      "Epoch 2/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.5126 - accuracy: 0.7722 - val_loss: 0.4775 - val_accuracy: 0.7793\n",
      "Epoch 3/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4724 - accuracy: 0.7855 - val_loss: 0.4481 - val_accuracy: 0.7769\n",
      "Epoch 4/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.4522 - accuracy: 0.7960 - val_loss: 0.4318 - val_accuracy: 0.7957\n",
      "Epoch 5/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4386 - accuracy: 0.8021 - val_loss: 0.4223 - val_accuracy: 0.8064\n",
      "Epoch 6/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4285 - accuracy: 0.8073 - val_loss: 0.4156 - val_accuracy: 0.8072\n",
      "Epoch 7/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4194 - accuracy: 0.8130 - val_loss: 0.4104 - val_accuracy: 0.8171\n",
      "Epoch 8/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4121 - accuracy: 0.8155 - val_loss: 0.4087 - val_accuracy: 0.8121\n",
      "Epoch 9/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4065 - accuracy: 0.8229 - val_loss: 0.4064 - val_accuracy: 0.8097\n",
      "Epoch 10/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3992 - accuracy: 0.8249 - val_loss: 0.4059 - val_accuracy: 0.8105\n",
      "Epoch 11/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3940 - accuracy: 0.8284 - val_loss: 0.4037 - val_accuracy: 0.8097\n",
      "Epoch 12/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3883 - accuracy: 0.8319 - val_loss: 0.4042 - val_accuracy: 0.8097\n",
      "Epoch 13/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3825 - accuracy: 0.8344 - val_loss: 0.4038 - val_accuracy: 0.8130\n",
      "Epoch 14/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.3767 - accuracy: 0.8387 - val_loss: 0.4045 - val_accuracy: 0.8097\n",
      "Epoch 15/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3721 - accuracy: 0.8389 - val_loss: 0.4095 - val_accuracy: 0.8080\n",
      "Epoch 16/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3674 - accuracy: 0.8446 - val_loss: 0.4105 - val_accuracy: 0.8064\n",
      "Epoch 17/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3596 - accuracy: 0.8483 - val_loss: 0.4097 - val_accuracy: 0.8130\n",
      "Epoch 18/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3552 - accuracy: 0.8510 - val_loss: 0.4112 - val_accuracy: 0.8023\n",
      "Epoch 19/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3478 - accuracy: 0.8528 - val_loss: 0.4181 - val_accuracy: 0.8064\n",
      "Epoch 20/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3414 - accuracy: 0.8578 - val_loss: 0.4192 - val_accuracy: 0.8039\n",
      "Epoch 21/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3358 - accuracy: 0.8600 - val_loss: 0.4227 - val_accuracy: 0.8031\n",
      "Epoch 22/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3282 - accuracy: 0.8641 - val_loss: 0.4237 - val_accuracy: 0.7957\n",
      "Epoch 23/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.3216 - accuracy: 0.8672 - val_loss: 0.4298 - val_accuracy: 0.8064\n",
      "Epoch 24/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3139 - accuracy: 0.8727 - val_loss: 0.4343 - val_accuracy: 0.8105\n",
      "Epoch 25/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3064 - accuracy: 0.8754 - val_loss: 0.4360 - val_accuracy: 0.8031\n",
      "Epoch 26/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2990 - accuracy: 0.8764 - val_loss: 0.4481 - val_accuracy: 0.7900\n",
      "Epoch 27/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2896 - accuracy: 0.8830 - val_loss: 0.4493 - val_accuracy: 0.8031\n",
      "Epoch 28/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2830 - accuracy: 0.8851 - val_loss: 0.4541 - val_accuracy: 0.7998\n",
      "Epoch 29/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2759 - accuracy: 0.8906 - val_loss: 0.4647 - val_accuracy: 0.8064\n",
      "Epoch 30/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2670 - accuracy: 0.8931 - val_loss: 0.4713 - val_accuracy: 0.7818\n",
      "Epoch 31/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2566 - accuracy: 0.8990 - val_loss: 0.4761 - val_accuracy: 0.7957\n",
      "Epoch 32/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2506 - accuracy: 0.9009 - val_loss: 0.4885 - val_accuracy: 0.7785\n",
      "Epoch 33/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2421 - accuracy: 0.9062 - val_loss: 0.4983 - val_accuracy: 0.7810\n",
      "Epoch 34/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2347 - accuracy: 0.9093 - val_loss: 0.5020 - val_accuracy: 0.7933\n",
      "Epoch 35/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2277 - accuracy: 0.9132 - val_loss: 0.5080 - val_accuracy: 0.7884\n",
      "Epoch 36/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2214 - accuracy: 0.9144 - val_loss: 0.5235 - val_accuracy: 0.7834\n",
      "Epoch 37/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2144 - accuracy: 0.9191 - val_loss: 0.5147 - val_accuracy: 0.7826\n",
      "Epoch 38/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2063 - accuracy: 0.9234 - val_loss: 0.5369 - val_accuracy: 0.7703\n",
      "Epoch 39/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1999 - accuracy: 0.9239 - val_loss: 0.5437 - val_accuracy: 0.7687\n",
      "Epoch 40/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1922 - accuracy: 0.9271 - val_loss: 0.5473 - val_accuracy: 0.7752\n",
      "Epoch 41/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1856 - accuracy: 0.9306 - val_loss: 0.5551 - val_accuracy: 0.7826\n",
      "Epoch 42/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1801 - accuracy: 0.9341 - val_loss: 0.5634 - val_accuracy: 0.7777\n",
      "Epoch 43/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1768 - accuracy: 0.9360 - val_loss: 0.5721 - val_accuracy: 0.7810\n",
      "Epoch 44/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1674 - accuracy: 0.9411 - val_loss: 0.5843 - val_accuracy: 0.7687\n",
      "Epoch 45/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1618 - accuracy: 0.9417 - val_loss: 0.5980 - val_accuracy: 0.7777\n",
      "Epoch 46/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1569 - accuracy: 0.9438 - val_loss: 0.6047 - val_accuracy: 0.7621\n",
      "Epoch 47/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1520 - accuracy: 0.9442 - val_loss: 0.6307 - val_accuracy: 0.7793\n",
      "Epoch 48/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1457 - accuracy: 0.9458 - val_loss: 0.6371 - val_accuracy: 0.7769\n",
      "Epoch 49/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1383 - accuracy: 0.9505 - val_loss: 0.6542 - val_accuracy: 0.7695\n",
      "Epoch 50/50\n",
      "4872/4872 [==============================] - 49s 10ms/step - loss: 0.1350 - accuracy: 0.9520 - val_loss: 0.6439 - val_accuracy: 0.7744\n",
      "1522/1522 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1219 samples\n",
      "Epoch 1/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.6203 - accuracy: 0.6792 - val_loss: 0.5595 - val_accuracy: 0.7662\n",
      "Epoch 2/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.5242 - accuracy: 0.7703 - val_loss: 0.4960 - val_accuracy: 0.7703\n",
      "Epoch 3/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4817 - accuracy: 0.7808 - val_loss: 0.4598 - val_accuracy: 0.8023\n",
      "Epoch 4/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4601 - accuracy: 0.7911 - val_loss: 0.4424 - val_accuracy: 0.8138\n",
      "Epoch 5/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4460 - accuracy: 0.7991 - val_loss: 0.4340 - val_accuracy: 0.8146\n",
      "Epoch 6/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4354 - accuracy: 0.8052 - val_loss: 0.4326 - val_accuracy: 0.8097\n",
      "Epoch 7/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4283 - accuracy: 0.8079 - val_loss: 0.4278 - val_accuracy: 0.8171\n",
      "Epoch 8/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4209 - accuracy: 0.8128 - val_loss: 0.4277 - val_accuracy: 0.8146\n",
      "Epoch 9/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4135 - accuracy: 0.8173 - val_loss: 0.4291 - val_accuracy: 0.8162\n",
      "Epoch 10/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4081 - accuracy: 0.8218 - val_loss: 0.4295 - val_accuracy: 0.8080\n",
      "Epoch 11/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4024 - accuracy: 0.8251 - val_loss: 0.4351 - val_accuracy: 0.8105\n",
      "Epoch 12/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3965 - accuracy: 0.8276 - val_loss: 0.4315 - val_accuracy: 0.8105\n",
      "Epoch 13/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3904 - accuracy: 0.8342 - val_loss: 0.4345 - val_accuracy: 0.8179\n",
      "Epoch 14/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3857 - accuracy: 0.8362 - val_loss: 0.4349 - val_accuracy: 0.8113\n",
      "Epoch 15/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3802 - accuracy: 0.8387 - val_loss: 0.4350 - val_accuracy: 0.8154\n",
      "Epoch 16/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3743 - accuracy: 0.8424 - val_loss: 0.4438 - val_accuracy: 0.8121\n",
      "Epoch 17/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3687 - accuracy: 0.8424 - val_loss: 0.4432 - val_accuracy: 0.8097\n",
      "Epoch 18/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3645 - accuracy: 0.8479 - val_loss: 0.4484 - val_accuracy: 0.8056\n",
      "Epoch 19/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3576 - accuracy: 0.8530 - val_loss: 0.4570 - val_accuracy: 0.8007\n",
      "Epoch 20/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3519 - accuracy: 0.8528 - val_loss: 0.4540 - val_accuracy: 0.7998\n",
      "Epoch 21/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3460 - accuracy: 0.8578 - val_loss: 0.4530 - val_accuracy: 0.8007\n",
      "Epoch 22/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3385 - accuracy: 0.8606 - val_loss: 0.4571 - val_accuracy: 0.7982\n",
      "Epoch 23/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3337 - accuracy: 0.8627 - val_loss: 0.4699 - val_accuracy: 0.7966\n",
      "Epoch 24/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3274 - accuracy: 0.8686 - val_loss: 0.4698 - val_accuracy: 0.7998\n",
      "Epoch 25/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3195 - accuracy: 0.8688 - val_loss: 0.4689 - val_accuracy: 0.7949\n",
      "Epoch 26/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3135 - accuracy: 0.8752 - val_loss: 0.4726 - val_accuracy: 0.7941\n",
      "Epoch 27/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.3047 - accuracy: 0.8785 - val_loss: 0.4833 - val_accuracy: 0.7908\n",
      "Epoch 28/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2972 - accuracy: 0.8824 - val_loss: 0.4887 - val_accuracy: 0.7908\n",
      "Epoch 29/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2919 - accuracy: 0.8851 - val_loss: 0.4880 - val_accuracy: 0.7892\n",
      "Epoch 30/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2846 - accuracy: 0.8885 - val_loss: 0.4997 - val_accuracy: 0.7834\n",
      "Epoch 31/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2779 - accuracy: 0.8910 - val_loss: 0.4988 - val_accuracy: 0.7867\n",
      "Epoch 32/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2685 - accuracy: 0.8996 - val_loss: 0.5080 - val_accuracy: 0.7884\n",
      "Epoch 33/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2634 - accuracy: 0.8990 - val_loss: 0.5184 - val_accuracy: 0.7810\n",
      "Epoch 34/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2554 - accuracy: 0.9031 - val_loss: 0.5312 - val_accuracy: 0.7810\n",
      "Epoch 35/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2473 - accuracy: 0.9095 - val_loss: 0.5231 - val_accuracy: 0.7826\n",
      "Epoch 36/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2417 - accuracy: 0.9091 - val_loss: 0.5417 - val_accuracy: 0.7842\n",
      "Epoch 37/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2353 - accuracy: 0.9117 - val_loss: 0.5404 - val_accuracy: 0.7801\n",
      "Epoch 38/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2279 - accuracy: 0.9142 - val_loss: 0.5420 - val_accuracy: 0.7818\n",
      "Epoch 39/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.2173 - accuracy: 0.9210 - val_loss: 0.5597 - val_accuracy: 0.7818\n",
      "Epoch 40/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2106 - accuracy: 0.9228 - val_loss: 0.5953 - val_accuracy: 0.7670\n",
      "Epoch 41/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2074 - accuracy: 0.9236 - val_loss: 0.5798 - val_accuracy: 0.7662\n",
      "Epoch 42/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.2020 - accuracy: 0.9294 - val_loss: 0.5817 - val_accuracy: 0.7793\n",
      "Epoch 43/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1949 - accuracy: 0.9302 - val_loss: 0.5903 - val_accuracy: 0.7711\n",
      "Epoch 44/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1848 - accuracy: 0.9347 - val_loss: 0.6178 - val_accuracy: 0.7629\n",
      "Epoch 45/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1788 - accuracy: 0.9374 - val_loss: 0.6208 - val_accuracy: 0.7629\n",
      "Epoch 46/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1740 - accuracy: 0.9409 - val_loss: 0.6330 - val_accuracy: 0.7670\n",
      "Epoch 47/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1713 - accuracy: 0.9407 - val_loss: 0.6329 - val_accuracy: 0.7670\n",
      "Epoch 48/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1609 - accuracy: 0.9462 - val_loss: 0.6461 - val_accuracy: 0.7719\n",
      "Epoch 49/50\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.1537 - accuracy: 0.9481 - val_loss: 0.6681 - val_accuracy: 0.7555\n",
      "Epoch 50/50\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.1517 - accuracy: 0.9489 - val_loss: 0.6756 - val_accuracy: 0.7555\n",
      "1522/1522 [==============================] - 3s 2ms/step\n",
      "Train on 4872 samples, validate on 1218 samples\n",
      "Epoch 1/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.6222 - accuracy: 0.6853 - val_loss: 0.5588 - val_accuracy: 0.7553\n",
      "Epoch 2/100\n",
      "4872/4872 [==============================] - 47s 10ms/step - loss: 0.5251 - accuracy: 0.7687 - val_loss: 0.4869 - val_accuracy: 0.7660\n",
      "Epoch 3/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4823 - accuracy: 0.7824 - val_loss: 0.4577 - val_accuracy: 0.7750\n",
      "Epoch 4/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4604 - accuracy: 0.7911 - val_loss: 0.4451 - val_accuracy: 0.7841\n",
      "Epoch 5/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4461 - accuracy: 0.7978 - val_loss: 0.4305 - val_accuracy: 0.7931\n",
      "Epoch 6/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4355 - accuracy: 0.8062 - val_loss: 0.4315 - val_accuracy: 0.7964\n",
      "Epoch 7/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4274 - accuracy: 0.8110 - val_loss: 0.4218 - val_accuracy: 0.7980\n",
      "Epoch 8/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4213 - accuracy: 0.8099 - val_loss: 0.4226 - val_accuracy: 0.7980\n",
      "Epoch 9/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4140 - accuracy: 0.8169 - val_loss: 0.4213 - val_accuracy: 0.7972\n",
      "Epoch 10/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4084 - accuracy: 0.8208 - val_loss: 0.4209 - val_accuracy: 0.7956\n",
      "Epoch 11/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.4025 - accuracy: 0.8245 - val_loss: 0.4238 - val_accuracy: 0.8062\n",
      "Epoch 12/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3981 - accuracy: 0.8276 - val_loss: 0.4204 - val_accuracy: 0.8013\n",
      "Epoch 13/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3904 - accuracy: 0.8296 - val_loss: 0.4239 - val_accuracy: 0.8005\n",
      "Epoch 14/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3853 - accuracy: 0.8305 - val_loss: 0.4237 - val_accuracy: 0.8021\n",
      "Epoch 15/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3801 - accuracy: 0.8339 - val_loss: 0.4249 - val_accuracy: 0.8054\n",
      "Epoch 16/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3750 - accuracy: 0.8362 - val_loss: 0.4289 - val_accuracy: 0.8030\n",
      "Epoch 17/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3693 - accuracy: 0.8422 - val_loss: 0.4285 - val_accuracy: 0.8005\n",
      "Epoch 18/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3620 - accuracy: 0.8467 - val_loss: 0.4318 - val_accuracy: 0.8038\n",
      "Epoch 19/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3572 - accuracy: 0.8491 - val_loss: 0.4353 - val_accuracy: 0.7980\n",
      "Epoch 20/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3520 - accuracy: 0.8506 - val_loss: 0.4390 - val_accuracy: 0.7972\n",
      "Epoch 21/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3447 - accuracy: 0.8571 - val_loss: 0.4462 - val_accuracy: 0.7939\n",
      "Epoch 22/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3400 - accuracy: 0.8578 - val_loss: 0.4468 - val_accuracy: 0.7964\n",
      "Epoch 23/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3321 - accuracy: 0.8612 - val_loss: 0.4524 - val_accuracy: 0.7890\n",
      "Epoch 24/100\n",
      "4872/4872 [==============================] - 48s 10ms/step - loss: 0.3269 - accuracy: 0.8658 - val_loss: 0.4537 - val_accuracy: 0.8005\n",
      "Epoch 25/100\n",
      "  96/4872 [..............................] - ETA: 47s - loss: 0.3359 - accuracy: 0.8854"
     ]
    }
   ],
   "source": [
    "cross_model = KerasClassifier(build_fn=build_model)\n",
    "\n",
    "batch_size = [16, 32, 64, 128]\n",
    "epochs = [10, 50, 100]\n",
    "validation_split=0.2\n",
    "param_grid = dict(batch_size=batch_size, epochs=epochs)\n",
    "\n",
    "grid = GridSearchCV(estimator=cross_model, param_grid=param_grid, n_jobs=1)\n",
    "grid_result = grid.fit(wordvec, y=y_train, validation_split=validation_split)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(7613, 1)"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"x_train.pkl\", 'wb') as f:\n",
    "    pickle.dump(wordvec, f)\n",
    "\n",
    "with open(\"y_train.pkl\", 'wb') as f:\n",
    "    pickle.dump(y_train, f)\n",
    "    \n",
    "with open(\"x_test.pkl\", 'wb') as f:\n",
    "    pickle.dump(wordvec_test, f)\n",
    "\n",
    "with open(\"id_test.pkl\", 'wb') as f:\n",
    "    pickle.dump(id_test, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(7613, 100, 768)"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wordvec.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
