{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\h5py\\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import jieba\n",
    "from collections import Counter\n",
    "import pandas as pd\n",
    "from numpy import nan\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import random as rn\n",
    "import os\n",
    "from keras import backend as K"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "os.environ['PYTHONHASHSEED'] = '0'\n",
    "\n",
    "np.random.seed(42)\n",
    "\n",
    "rn.seed(12345)\n",
    "\n",
    "session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n",
    "\n",
    "tf.set_random_seed(1234)\n",
    "\n",
    "sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n",
    "K.set_session(sess)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## process_tweet\n",
    "## 标注说明\n",
    "The possible stance labels are:\n",
    "1. FAVOR: We can infer from the tweet that the tweeter supports the target (e.g., directly or indirectly by supporting someone/something, by opposing or criticizing someone/something opposed to the target, or by echoing the stance of somebody else).\n",
    "2. AGAINST: We can infer from the tweet that the tweeter is against the target (e.g., directly or indirectly by opposing or criticizing someone/something, by supporting someone/something opposed to the target, or by echoing the stance of somebody else).\n",
    "3. NONE: none of the above.\n",
    "\n",
    "The possible 'opinion towards' labels are:\n",
    "1. TARGET: The tweet explicitly expresses opinion about the target, a part of the target, or an aspect of the target.\n",
    "2. OTHER: The tweet does NOT expresses opinion about the target but it HAS opinion about something or someone other than the target.\n",
    "3. NO ONE: The tweet is not explicitly expressing opinion. (For example, the tweet is simply giving information.)\n",
    "\n",
    "The possible sentiment labels are:\n",
    "1. POSITIVE: the speaker is using positive language, for example, expressions of support, admiration, positive attitude, forgiveness, fostering, success, positive emotional state (happiness, optimism, pride, etc.)\n",
    "2. NEGATIVE: the speaker is using negative language, for example, expressions of criticism, judgment, negative attitude, questioning validity/competence, failure, negative emotional state (anger, frustration, sadness, anxiety, etc.)\n",
    "3. NEITHER: none of the above."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_tweet = pd.read_table('data/semeval/trainingdata-all-annotations.txt',encoding='gbk')\n",
    "test_tweet = pd.read_table('data/semeval/testdata-taskA-all-annotations.txt',encoding='gbk')\n",
    "trial_tweet = pd.read_table('data/semeval/trialdata-all-annotations.txt',encoding='gbk')\n",
    "train_tweet = train_tweet.append(trial_tweet,ignore_index=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from nltk.stem.porter import PorterStemmer\n",
    "def preprocess_word(word):\n",
    "    # Remove punctuation\n",
    "    word = word.strip('\\'\"?!,.():;')\n",
    "    # Convert more than 2 letter repetitions to 2 letter\n",
    "    # funnnnny --> funny\n",
    "    word = re.sub(r'(.)\\1+', r'\\1\\1', word)\n",
    "    # Remove - & '\n",
    "    word = re.sub(r'(-|\\')', '', word)\n",
    "    return word\n",
    "\n",
    "\n",
    "def is_valid_word(word):\n",
    "    # Check if word begins with an alphabet\n",
    "    return (re.search(r'^[a-zA-Z][a-z0-9A-Z\\._]*$', word) is not None)\n",
    "\n",
    "\n",
    "def handle_emojis(tweet):\n",
    "    # Smile -- :), : ), :-), (:, ( :, (-:, :')\n",
    "    tweet = re.sub(r'(:\\s?\\)|:-\\)|\\(\\s?:|\\(-:|:\\'\\))', ' EMO_POS ', tweet)\n",
    "    # Laugh -- :D, : D, :-D, xD, x-D, XD, X-D\n",
    "    tweet = re.sub(r'(:\\s?D|:-D|x-?D|X-?D)', ' EMO_POS ', tweet)\n",
    "    # Love -- <3, :*\n",
    "    tweet = re.sub(r'(<3|:\\*)', ' EMO_POS ', tweet)\n",
    "    # Wink -- ;-), ;), ;-D, ;D, (;,  (-;\n",
    "    tweet = re.sub(r'(;-?\\)|;-?D|\\(-?;)', ' EMO_POS ', tweet)\n",
    "    # Sad -- :-(, : (, :(, ):, )-:\n",
    "    tweet = re.sub(r'(:\\s?\\(|:-\\(|\\)\\s?:|\\)-:)', ' EMO_NEG ', tweet)\n",
    "    # Cry -- :,(, :'(, :\"(\n",
    "    tweet = re.sub(r'(:,\\(|:\\'\\(|:\"\\()', ' EMO_NEG ', tweet)\n",
    "    return tweet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from nltk.tokenize import word_tokenize\n",
    "def pre_process_tweet(tweet):\n",
    "    processed_tweet = []\n",
    "    # Convert to lower case\n",
    "    tweet = tweet.lower()\n",
    "    # Replaces URLs with the word URL\n",
    "    tweet = re.sub(r'((www\\.[\\S]+)|(https?://[\\S]+))', ' URL ', tweet)\n",
    "    # Replace @handle with the word USER_MENTION\n",
    "    tweet = re.sub(r'@[\\S]+', 'USER_MENTION', tweet)\n",
    "    # Replaces #hashtag with hashtag\n",
    "    tweet = re.sub(r'#(\\S+)', r' \\1 ', tweet)\n",
    "    # Remove RT (retweet)\n",
    "    tweet = re.sub(r'\\brt\\b', '', tweet)\n",
    "    # Replace 2+ dots with space\n",
    "    tweet = re.sub(r'\\.{2,}', ' ', tweet)\n",
    "    # Strip space, \" and ' from tweet\n",
    "    tweet = tweet.strip(' \"\\'')\n",
    "    # Replace emojis with either EMO_POS or EMO_NEG\n",
    "    tweet = handle_emojis(tweet)\n",
    "    # Replace multiple spaces with a single space\n",
    "    tweet = re.sub(r'\\s+', ' ', tweet)\n",
    "    processed_tweet = word_tokenize(tweet)\n",
    "#     words = tweet.split()\n",
    "#     for word in words:\n",
    "#         word = preprocess_word(word)\n",
    "#         if is_valid_word(word):\n",
    "#             if use_stemmer:\n",
    "#                 word = str(porter_stemmer.stem(word))\n",
    "#             processed_tweet.append(word)\n",
    "\n",
    "    return ' '.join(processed_tweet)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "label_dict_tweet =  {nan:0,'NONE':0,'FAVOR':1,'AGAINST':2}\n",
    "train_tweet['WORDS'] = train_tweet['Tweet'].apply(lambda x:pre_process_tweet(x))\n",
    "test_tweet['WORDS'] = test_tweet['Tweet'].apply(lambda x:pre_process_tweet(x))\n",
    "train_tweet['LABEL'] = train_tweet['Stance'].apply(lambda x:label_dict_tweet[x])\n",
    "test_tweet['LABEL'] = test_tweet['Stance'].apply(lambda x:label_dict_tweet[x])\n",
    "train_tweet['TARGET'] = train_tweet['Target'].apply(lambda x:x.lower())\n",
    "test_tweet['TARGET'] = test_tweet['Target'].apply(lambda x:x.lower())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\ipykernel_launcher.py:3: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
      "  This is separate from the ipykernel package so we can avoid doing imports until\n",
      "D:\\program\\Lib\\site-packages\\ipykernel_launcher.py:4: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
      "  after removing the cwd from sys.path.\n"
     ]
    }
   ],
   "source": [
    "train_tweet_climate = train_tweet[train_tweet.TARGET=='climate change is a real concern']\n",
    "test_tweet_climate = test_tweet[test_tweet.TARGET=='climate change is a real concern']\n",
    "train_tweet_climate['TARGET'] = 'climate change is concern'\n",
    "test_tweet_climate['TARGET'] = 'climate change is concern'\n",
    "# 注意;训练与测试target 有出入\n",
    "train_tweet_abortion = train_tweet[train_tweet.TARGET=='legalization of abortion']\n",
    "test_tweet_abortion = test_tweet[test_tweet.TARGET=='legalization of abortion']\n",
    "\n",
    "train_tweet_feminist = train_tweet[train_tweet.TARGET=='feminist movement']\n",
    "test_tweet_feminist = test_tweet[test_tweet.TARGET=='feminist movement']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_tweet_hillary = train_tweet[train_tweet.TARGET=='hillary clinton']\n",
    "test_tweet_hillary = test_tweet[test_tweet.TARGET=='hillary clinton']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=====数据分布====\n",
      "climate change is concern\n",
      "Counter({'FAVOR': 212, 'NONE': 168, 'AGAINST': 15})\n",
      "Counter({'FAVOR': 123, 'NONE': 35, 'AGAINST': 11})\n",
      "legaliztion of abortion\n",
      "Counter({'AGAINST': 355, 'NONE': 177, 'FAVOR': 121})\n",
      "Counter({'AGAINST': 189, 'FAVOR': 46, 'NONE': 45})\n"
     ]
    }
   ],
   "source": [
    "print('=====数据分布====')\n",
    "print('climate change is concern')\n",
    "print(Counter(train_tweet_climate.Stance.tolist()))\n",
    "print(Counter(test_tweet_climate.Stance.tolist()))\n",
    "print('legaliztion of abortion')\n",
    "print(Counter(train_tweet_abortion.Stance.tolist()))\n",
    "print(Counter(test_tweet_abortion.Stance.tolist()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## process_weibo\n",
    "#### Overview of NLPCC Shared Task 4: Stance Detection in Chinese Microblogs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def pre_process_weibo(sent):\n",
    "    sent = sent.lower()\n",
    "    sent = re.sub(r'www[/|:|0-9|a-z|A-Z|.]+',' URL ',sent)\n",
    "    sent = re.sub(r'https?[/|:|0-9|a-z|A-Z|.]+',' URL ',sent)\n",
    "    sent = re.sub(r'@[\\S]+', ' USERMENTION ', sent)\n",
    "    sent = re.sub(r'#(\\S+)', r'\\1', sent)\n",
    "    sent = re.sub(r'[\\\"|\\“|\\”|\\/|／|(|)|（|）|—|#]+',r'',sent)\n",
    "    sent = re.sub(r'iphone([a-z|0-9]+)',r'iphone \\1',sent)\n",
    "    return ' '.join(jieba.lcut(sent))\n",
    "train_weibo = pd.read_table('data/nlpcc/train.txt')\n",
    "test_weibo = pd.read_table('data/nlpcc/gold.txt',encoding='gbk')\n",
    "label_dict_weibo =  {nan:0,'NONE':0,'FAVOR':1,'AGAINST':2}\n",
    "train_weibo['WORDS'] = train_weibo['TEXT'].apply(lambda x:pre_process_weibo(x))\n",
    "test_weibo['WORDS'] = test_weibo['TEXT'].apply(lambda x:pre_process_weibo(x))\n",
    "train_weibo['LABEL'] = train_weibo['STANCE'].apply(lambda x:label_dict_weibo[x])\n",
    "test_weibo['LABEL'] = test_weibo['STANCE'].apply(lambda x:label_dict_weibo[x])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style>\n",
       "    .dataframe thead tr:only-child th {\n",
       "        text-align: right;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>ID</th>\n",
       "      <th>TARGET</th>\n",
       "      <th>TEXT</th>\n",
       "      <th>STANCE</th>\n",
       "      <th>WORDS</th>\n",
       "      <th>LABEL</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>3月31日，苹果iPhone SE正式开卖，然而这款小屏新机并未出现人们预想的疯抢局面。根据...</td>\n",
       "      <td>NONE</td>\n",
       "      <td>3 月 31 日 ， 苹果 iphone   se 正式 开卖 ， 然而 这款 小屏 新机 ...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>2</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>当时看到中国预定340万挺不相信，毕竟 6s ／ 6s Plus 全球首周末1300万。今天...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>当时 看到 中国 预定 340 万挺 不 相信 ， 毕竟   6s     6s   plu...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>3</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>Phone SE，价格感人，但是看这外形，又可以拿着5s出去装逼了。</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>phone   se ， 价格 感人 ， 但是 看 这 外形 ， 又 可以 拿 着 5s 出...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>4</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE官网已经要2-3周发货了，也真是一件神奇的事情……</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>iphone   se 官网 已经 要 2 - 3 周 发货 了 ， 也 真是 一件 神奇 ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE良心价的原因之一：内存只1GB？：如果真的这样，不知道会浇灭多少人马上掏钱...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>iphone   se 良心 价 的 原因 之一 ： 内存 只 1gb ？ ： 如果 真的 ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>6</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>睡了。失望至极，iphone se和5外观几乎没有区别。只是把6s的配置装到了原本的ipho...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>睡 了 。 失望 至极 ， iphone   se 和 5 外观 几乎 没有 区别 。 只是...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>7</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>日前是国行iPhone SE正式发售第一天，很多预定用户拿到了iPhone SE新机。不过，...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>日前 是 国行 iphone   se 正式 发售 第一天 ， 很多 预定 用户 拿到 了 ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>8</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE的外观太让人失望了，也许价格会成吸引点吧</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>iphone   se 的 外观 太 让 人 失望 了 ， 也许 价格 会成 吸引 点 吧</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>9</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>已经等不及想要去看看这款手机了</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>已经 等不及 想要 去 看看 这 款 手机 了</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>10</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>黄屏门似乎一直没有回应？</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>黄 屏门 似乎 一直 没有 回应 ？</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>11</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>听说黄屏，还好我没入坑。你们就当护眼模式吧</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>听说 黄屏 ， 还好 我 没入 坑 。 你们 就 当 护眼 模式 吧</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>12</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>很想尝试下iPhonese……</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>很想 尝试 下 iphonese … …</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>13</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>终于等到小屏苹果了，一直想换手机，6S太大，5S想买个用，但是发现这个了以后就一直等着。</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>终于 等到 小屏 苹果 了 ， 一直 想换 手机 ， 6s 太大 ， 5s 想买个 用 ， ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>14</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>最近几个月一直认为2016年二季度将是智能手机产业的一个中期的景气拐点。苹果应该是看到情形不...</td>\n",
       "      <td>NONE</td>\n",
       "      <td>最 近几个月 一直 认为 2016 年 二季度 将 是 智能手机 产业 的 一个 中期 的 ...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>15</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone se 这价格诚意十足啊，准备入手</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphone   se   这 价格 诚意 十足 啊 ， 准备 入手</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>16</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>我的评论来自SE，好轻，</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>我 的 评论 来自 se ， 好轻 ，</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>17</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>值得拥有！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>值得 拥有 ！</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>18</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>很多人挺搞笑的，说苹果就必须要越造越大才有人买，那你怎么不干脆去买个电脑 买iphone6 ...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>很多 人 挺 搞笑 的 ， 说 苹果 就 必须 要 越造 越大才 有人 买 ， 那 你 怎么...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>19</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>我的今天刚到，周围是有点黄屏，不过不碍事哈哈哈</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>我 的 今天 刚到 ， 周围 是 有点 黄屏 ， 不过 不 碍事 哈哈哈</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>20</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>国人对于手机的用途还没达到必须用大屏的时代，小屏的优势还是很明显的，希望手机厂商都能理性回归吧。</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>国人 对于 手机 的 用途 还 没 达到 必须 用大屏 的 时代 ， 小屏 的 优势 还是 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20</th>\n",
       "      <td>21</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>说实话，很讨厌苹果的操作方式，很不方便。安卓一键回主屏，何其方便！苹果的屏幕已经够小了，还弄...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>说实话 ， 很 讨厌 苹果 的 操作 方式 ， 很 不 方便 。 安卓 一键 回主屏 ， 何...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21</th>\n",
       "      <td>22</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>系统没得说 苹果做的就是系统 开了很多后台依然流畅 再好的安卓时间长了照样卡成屎</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>系统 没得说   苹果 做 的 就是 系统   开 了 很多 后台 依然 流畅   再 好 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22</th>\n",
       "      <td>23</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>刚开封一天的全新iphoneSE 4300买的 64g 3500到4000卖出 想要的快来找我</td>\n",
       "      <td>NONE</td>\n",
       "      <td>刚 开封 一天 的 全新 iphonese   4300 买 的   64g   3500 ...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23</th>\n",
       "      <td>24</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE发布第天，中国网民开始对这款4英寸设备进行集体吐槽，大量的微博评论直指iP...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>iphone   se 发布 第天 ， 中国网民 开始 对 这款 4 英寸 设备 进行 集体...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24</th>\n",
       "      <td>25</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iphone SE 这配置…这价格…真的吧国产机秒的渣都不剩？还有什么理由不用它？因为没有情怀么？</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphone   se   这 配置 … 这 价格 … 真的 吧 国产机 秒 的 渣 都 不...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25</th>\n",
       "      <td>26</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>我的看法，iPhone6造型确实比不上前代作品，现在拿4来看依然时尚好看，双面玻璃反光时尚，...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>我 的 看法 ， iphone6 造型 确实 比不上 前 代 作品 ， 现在 拿 4 来看 ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26</th>\n",
       "      <td>27</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>很好的手机，性价比高，比国产好的没话说，国产一年一环 苹果用5年8年没事不卡不迟钝，黑粉们哪...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>很 好 的 手机 ， 性价比 高 ， 比 国产 好 的 没话说 ， 国产 一年 一环   苹...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27</th>\n",
       "      <td>28</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>可以换的！我一直都是官网买的，重来没出过问题机！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>可以 换 的 ！ 我 一直 都 是 官网 买 的 ， 重来 没 出过 问题 机 ！</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28</th>\n",
       "      <td>29</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>手机中的小钢炮 大屏幕手机可能很多人喜欢 但是无意也会有喜欢小屏幕 高性能的手机 我觉得这是...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>手机 中 的 小钢炮   大屏幕 手机 可能 很多 人 喜欢   但是 无意 也 会 有 喜...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29</th>\n",
       "      <td>30</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>我相信随着iphone5se的发布 iphone5s的销量也会迎来第二春 iphone作为手...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>我 相信 随着 iphone5se 的 发布   iphone5s 的 销量 也 会 迎来 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>570</th>\n",
       "      <td>571</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>回答我 iphone5se值不值得买？</td>\n",
       "      <td>NONE</td>\n",
       "      <td>回答 我   iphone5se 值不值得 买 ？</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>571</th>\n",
       "      <td>572</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>作为售价相对低廉的一款 iPhone，并且具备与 iPhone 6S 几乎相同的性能，这点务...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>作为 售价 相对 低廉 的 一款   iphone ， 并且 具备 与   iphone  ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>572</th>\n",
       "      <td>573</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>看上去，比较适合女性朋友们使用的吧</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>看上去 ， 比较 适合 女性朋友 们 使用 的 吧</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>573</th>\n",
       "      <td>574</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE已有货！小6s，作为苹果史上性价比最高的手机，值得入手。</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphone   se 已有 货 ！ 小 6s ， 作为 苹果 史上 性价比 最高 的 手机...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>574</th>\n",
       "      <td>575</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>别的不知道我觉得iPhone6+在阳光下拍照发黄发焦简直是让人受不了！ 不过非阳光下还是灰常...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>别的 不 知道 我 觉得 iphone6 + 在 阳光 下 拍照 发黄 发焦 简直 是 让 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>575</th>\n",
       "      <td>576</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>吸引了！我由iPhone6P转SE，单手触控感觉良好！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>吸引 了 ！ 我 由 iphone6p 转 se ， 单手 触控 感觉良好 ！</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>576</th>\n",
       "      <td>577</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>都快出7了………有点脑子的人都不会去买这个仿5s的东西！5s是经典之作，并不是单纯提升所谓的...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>都 快 出 7 了 … … … 有点 脑子 的 人 都 不会 去 买 这个 仿 5s 的 东...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>577</th>\n",
       "      <td>578</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>换了深空灰的iPhone se之后一点都不想买手机壳了，就是喜欢他背面的颜色，反正我是不用手...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>换 了 深空 灰 的 iphone   se 之后 一点 都 不想 买手机 壳 了 ， 就是...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>578</th>\n",
       "      <td>579</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>我觉得续航肯定大悲剧，想象一下IP6续航的一半多么可怕</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>我 觉得 续航 肯定 大 悲剧 ， 想象 一下 ip6 续航 的 一半 多么 可怕</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>579</th>\n",
       "      <td>580</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>Phone5se的传闻一来一堆媒体表达“小屏重回主流”——我不明白“主流”是哪里来的，或许真...</td>\n",
       "      <td>NONE</td>\n",
       "      <td>phone5se 的 传闻 一来 一堆 媒体 表达 小屏 重回 主流 我 不 明白 主流 是...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>580</th>\n",
       "      <td>581</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhoneSE 16GB💲399出来后。。。iPhone6还有6plus要卖不出去了！！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphonese   16gb 💲 399 出来 后 。 。 。 iphone6 还有 6p...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>581</th>\n",
       "      <td>582</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>虽然我是忠实的苹果粉丝用的笔记本、手机和iPad都是苹果但是对于这款iPhoneSE我确一点...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>虽然 我 是 忠实 的 苹果 粉丝 用 的 笔记本 、 手机 和 ipad 都 是 苹果 但...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>582</th>\n",
       "      <td>583</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>讲道理，iphone se这个配置这个价格，我心动了</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>讲道理 ， iphone   se 这个 配置 这个 价格 ， 我 心动 了</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>583</th>\n",
       "      <td>584</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>真机好Q啊啊啊啊啊</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>真机 好 q 啊啊啊 啊 啊</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>584</th>\n",
       "      <td>585</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE还不错啊！配置、价格都是亮点！如果不是和5s长得一样，我都想换掉5s了！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphone   se 还 不错 啊 ！ 配置 、 价格 都 是 亮点 ！ 如果 不是 和 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>585</th>\n",
       "      <td>586</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>互联网生态时代，要让用户只为价值买单。硬件如此暴利还值得粉吗，你的消费观是什么</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>互联网 生态 时代 ， 要 让 用户 只 为 价值 买单 。 硬件 如此 暴利 还 值得 粉...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>586</th>\n",
       "      <td>587</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhone SE相比6s少了3D Touch、128G内存选择、双域像素、LTE-Adva...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>iphone   se 相比 6s 少 了 3d   touch 、 128g 内存 选择 ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>587</th>\n",
       "      <td>588</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>网上定的IPhone SE终于到了 开心 不黄屏 么么哒</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>网上 定 的 iphone   se 终于 到 了   开心   不 黄屏   么 么 哒</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>588</th>\n",
       "      <td>589</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>iPhoneSE电池续航真不是盖的，重度使用一天无压力，但到20%时得开启低电量模式</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>iphonese 电池 续航 真 不是 盖 的 ， 重度 使用 一天 无 压力 ， 但 到 ...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>589</th>\n",
       "      <td>590</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>事实上，除了尺寸和造型之外，iPhone SE 和 iPhone 6s 还是存在一些细节方面...</td>\n",
       "      <td>NONE</td>\n",
       "      <td>事实上 ， 除了 尺寸 和 造型 之外 ， iphone   se   和   iphone...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>590</th>\n",
       "      <td>591</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>今天卖的iPhoneSE玫瑰金，的确够屌</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>今天 卖 的 iphonese 玫瑰 金 ， 的确 够 屌</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>591</th>\n",
       "      <td>592</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>屏幕占比小（屏也小）、手机像素差、屏幕分辨率低、高不就低不下。买SE性价比超低...不如买华...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>屏幕 占 比小屏 也 小 、 手机 像素 差 、 屏幕 分辨率 低 、 高 不 就 低 不下...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>592</th>\n",
       "      <td>593</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>phoneSE，3288 ！！  看着跟5S外观一个样！但功能跟反应速度大大不一样！</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>phonese ， 3288   ！ ！     看着 跟 5s 外观 一个样 ！ 但 功能...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>593</th>\n",
       "      <td>594</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>发布这样的产品真感觉苹果公司快不行了，想想当年苹果4 4s是nb到什么境界了，从苹果5开始到...</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>发布 这样 的 产品 真 感觉 苹果公司 快 不行 了 ， 想想 当年 苹果 4   4s ...</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>594</th>\n",
       "      <td>595</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>外观好看用着大气</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>外观 好看 用 着 大气</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>595</th>\n",
       "      <td>596</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>IFix 拆机证明了5S和SE屏幕共用</td>\n",
       "      <td>NONE</td>\n",
       "      <td>ifix   拆机 证明 了 5s 和 se 屏幕 共用</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>596</th>\n",
       "      <td>597</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>能不能不要和5s长得一样</td>\n",
       "      <td>AGAINST</td>\n",
       "      <td>能 不能不要 和 5s 长得 一样</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>597</th>\n",
       "      <td>598</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>当然，适合自己的才是最重要的。如果不喜欢SE的大小那么6s才是现在苹果阵营中最适合的那一个。...</td>\n",
       "      <td>NONE</td>\n",
       "      <td>当然 ， 适合 自己 的 才 是 最 重要 的 。 如果 不 喜欢 se 的 大小 那么 6...</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>598</th>\n",
       "      <td>599</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>苹果iPhoneSE虽然看起来可能会比iPhone6s低一个档次，但是在某些配置上并不输iP...</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>苹果 iphonese 虽然 看起来 可能 会 比 iphone6s 低 一个 档次 ， 但...</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>599</th>\n",
       "      <td>600</td>\n",
       "      <td>iphone se</td>\n",
       "      <td>去店里看了看，跟我的4s 对比屏幕根本不算黄啊，，，</td>\n",
       "      <td>FAVOR</td>\n",
       "      <td>去 店里 看 了 看 ， 跟 我 的 4s   对比 屏幕 根本 不算 黄 啊 ， ， ，</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>600 rows × 6 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "      ID     TARGET                                               TEXT  \\\n",
       "0      1  iphone se  3月31日，苹果iPhone SE正式开卖，然而这款小屏新机并未出现人们预想的疯抢局面。根据...   \n",
       "1      2  iphone se  当时看到中国预定340万挺不相信，毕竟 6s ／ 6s Plus 全球首周末1300万。今天...   \n",
       "2      3  iphone se                 Phone SE，价格感人，但是看这外形，又可以拿着5s出去装逼了。   \n",
       "3      4  iphone se                 iPhone SE官网已经要2-3周发货了，也真是一件神奇的事情……   \n",
       "4      5  iphone se  iPhone SE良心价的原因之一：内存只1GB？：如果真的这样，不知道会浇灭多少人马上掏钱...   \n",
       "5      6  iphone se  睡了。失望至极，iphone se和5外观几乎没有区别。只是把6s的配置装到了原本的ipho...   \n",
       "6      7  iphone se  日前是国行iPhone SE正式发售第一天，很多预定用户拿到了iPhone SE新机。不过，...   \n",
       "7      8  iphone se                      iPhone SE的外观太让人失望了，也许价格会成吸引点吧   \n",
       "8      9  iphone se                                    已经等不及想要去看看这款手机了   \n",
       "9     10  iphone se                                       黄屏门似乎一直没有回应？   \n",
       "10    11  iphone se                             听说黄屏，还好我没入坑。你们就当护眼模式吧    \n",
       "11    12  iphone se                                    很想尝试下iPhonese……   \n",
       "12    13  iphone se       终于等到小屏苹果了，一直想换手机，6S太大，5S想买个用，但是发现这个了以后就一直等着。   \n",
       "13    14  iphone se  最近几个月一直认为2016年二季度将是智能手机产业的一个中期的景气拐点。苹果应该是看到情形不...   \n",
       "14    15  iphone se                            iPhone se 这价格诚意十足啊，准备入手   \n",
       "15    16  iphone se                                       我的评论来自SE，好轻，   \n",
       "16    17  iphone se                                              值得拥有！   \n",
       "17    18  iphone se  很多人挺搞笑的，说苹果就必须要越造越大才有人买，那你怎么不干脆去买个电脑 买iphone6 ...   \n",
       "18    19  iphone se                            我的今天刚到，周围是有点黄屏，不过不碍事哈哈哈   \n",
       "19    20  iphone se   国人对于手机的用途还没达到必须用大屏的时代，小屏的优势还是很明显的，希望手机厂商都能理性回归吧。   \n",
       "20    21  iphone se  说实话，很讨厌苹果的操作方式，很不方便。安卓一键回主屏，何其方便！苹果的屏幕已经够小了，还弄...   \n",
       "21    22  iphone se           系统没得说 苹果做的就是系统 开了很多后台依然流畅 再好的安卓时间长了照样卡成屎   \n",
       "22    23  iphone se    刚开封一天的全新iphoneSE 4300买的 64g 3500到4000卖出 想要的快来找我   \n",
       "23    24  iphone se  iPhone SE发布第天，中国网民开始对这款4英寸设备进行集体吐槽，大量的微博评论直指iP...   \n",
       "24    25  iphone se  iphone SE 这配置…这价格…真的吧国产机秒的渣都不剩？还有什么理由不用它？因为没有情怀么？   \n",
       "25    26  iphone se  我的看法，iPhone6造型确实比不上前代作品，现在拿4来看依然时尚好看，双面玻璃反光时尚，...   \n",
       "26    27  iphone se  很好的手机，性价比高，比国产好的没话说，国产一年一环 苹果用5年8年没事不卡不迟钝，黑粉们哪...   \n",
       "27    28  iphone se                           可以换的！我一直都是官网买的，重来没出过问题机！   \n",
       "28    29  iphone se  手机中的小钢炮 大屏幕手机可能很多人喜欢 但是无意也会有喜欢小屏幕 高性能的手机 我觉得这是...   \n",
       "29    30  iphone se  我相信随着iphone5se的发布 iphone5s的销量也会迎来第二春 iphone作为手...   \n",
       "..   ...        ...                                                ...   \n",
       "570  571  iphone se                                回答我 iphone5se值不值得买？   \n",
       "571  572  iphone se  作为售价相对低廉的一款 iPhone，并且具备与 iPhone 6S 几乎相同的性能，这点务...   \n",
       "572  573  iphone se                                  看上去，比较适合女性朋友们使用的吧   \n",
       "573  574  iphone se              iPhone SE已有货！小6s，作为苹果史上性价比最高的手机，值得入手。   \n",
       "574  575  iphone se  别的不知道我觉得iPhone6+在阳光下拍照发黄发焦简直是让人受不了！ 不过非阳光下还是灰常...   \n",
       "575  576  iphone se                        吸引了！我由iPhone6P转SE，单手触控感觉良好！   \n",
       "576  577  iphone se  都快出7了………有点脑子的人都不会去买这个仿5s的东西！5s是经典之作，并不是单纯提升所谓的...   \n",
       "577  578  iphone se  换了深空灰的iPhone se之后一点都不想买手机壳了，就是喜欢他背面的颜色，反正我是不用手...   \n",
       "578  579  iphone se                        我觉得续航肯定大悲剧，想象一下IP6续航的一半多么可怕   \n",
       "579  580  iphone se  Phone5se的传闻一来一堆媒体表达“小屏重回主流”——我不明白“主流”是哪里来的，或许真...   \n",
       "580  581  iphone se      iPhoneSE 16GB💲399出来后。。。iPhone6还有6plus要卖不出去了！！   \n",
       "581  582  iphone se  虽然我是忠实的苹果粉丝用的笔记本、手机和iPad都是苹果但是对于这款iPhoneSE我确一点...   \n",
       "582  583  iphone se                         讲道理，iphone se这个配置这个价格，我心动了   \n",
       "583  584  iphone se                                          真机好Q啊啊啊啊啊   \n",
       "584  585  iphone se      iPhone SE还不错啊！配置、价格都是亮点！如果不是和5s长得一样，我都想换掉5s了！   \n",
       "585  586  iphone se            互联网生态时代，要让用户只为价值买单。硬件如此暴利还值得粉吗，你的消费观是什么   \n",
       "586  587  iphone se  iPhone SE相比6s少了3D Touch、128G内存选择、双域像素、LTE-Adva...   \n",
       "587  588  iphone se                       网上定的IPhone SE终于到了 开心 不黄屏 么么哒   \n",
       "588  589  iphone se         iPhoneSE电池续航真不是盖的，重度使用一天无压力，但到20%时得开启低电量模式   \n",
       "589  590  iphone se  事实上，除了尺寸和造型之外，iPhone SE 和 iPhone 6s 还是存在一些细节方面...   \n",
       "590  591  iphone se                               今天卖的iPhoneSE玫瑰金，的确够屌   \n",
       "591  592  iphone se  屏幕占比小（屏也小）、手机像素差、屏幕分辨率低、高不就低不下。买SE性价比超低...不如买华...   \n",
       "592  593  iphone se         phoneSE，3288 ！！  看着跟5S外观一个样！但功能跟反应速度大大不一样！   \n",
       "593  594  iphone se  发布这样的产品真感觉苹果公司快不行了，想想当年苹果4 4s是nb到什么境界了，从苹果5开始到...   \n",
       "594  595  iphone se                                           外观好看用着大气   \n",
       "595  596  iphone se                                IFix 拆机证明了5S和SE屏幕共用   \n",
       "596  597  iphone se                                       能不能不要和5s长得一样   \n",
       "597  598  iphone se  当然，适合自己的才是最重要的。如果不喜欢SE的大小那么6s才是现在苹果阵营中最适合的那一个。...   \n",
       "598  599  iphone se  苹果iPhoneSE虽然看起来可能会比iPhone6s低一个档次，但是在某些配置上并不输iP...   \n",
       "599  600  iphone se                         去店里看了看，跟我的4s 对比屏幕根本不算黄啊，，，   \n",
       "\n",
       "      STANCE                                              WORDS  LABEL  \n",
       "0       NONE  3 月 31 日 ， 苹果 iphone   se 正式 开卖 ， 然而 这款 小屏 新机 ...      0  \n",
       "1      FAVOR  当时 看到 中国 预定 340 万挺 不 相信 ， 毕竟   6s     6s   plu...      1  \n",
       "2    AGAINST  phone   se ， 价格 感人 ， 但是 看 这 外形 ， 又 可以 拿 着 5s 出...      2  \n",
       "3    AGAINST  iphone   se 官网 已经 要 2 - 3 周 发货 了 ， 也 真是 一件 神奇 ...      2  \n",
       "4    AGAINST  iphone   se 良心 价 的 原因 之一 ： 内存 只 1gb ？ ： 如果 真的 ...      2  \n",
       "5    AGAINST  睡 了 。 失望 至极 ， iphone   se 和 5 外观 几乎 没有 区别 。 只是...      2  \n",
       "6    AGAINST  日前 是 国行 iphone   se 正式 发售 第一天 ， 很多 预定 用户 拿到 了 ...      2  \n",
       "7    AGAINST      iphone   se 的 外观 太 让 人 失望 了 ， 也许 价格 会成 吸引 点 吧      2  \n",
       "8      FAVOR                            已经 等不及 想要 去 看看 这 款 手机 了      1  \n",
       "9    AGAINST                                 黄 屏门 似乎 一直 没有 回应 ？      2  \n",
       "10   AGAINST               听说 黄屏 ， 还好 我 没入 坑 。 你们 就 当 护眼 模式 吧        2  \n",
       "11     FAVOR                               很想 尝试 下 iphonese … …      1  \n",
       "12     FAVOR  终于 等到 小屏 苹果 了 ， 一直 想换 手机 ， 6s 太大 ， 5s 想买个 用 ， ...      1  \n",
       "13      NONE  最 近几个月 一直 认为 2016 年 二季度 将 是 智能手机 产业 的 一个 中期 的 ...      0  \n",
       "14     FAVOR                 iphone   se   这 价格 诚意 十足 啊 ， 准备 入手      1  \n",
       "15     FAVOR                                我 的 评论 来自 se ， 好轻 ，      1  \n",
       "16     FAVOR                                            值得 拥有 ！      1  \n",
       "17     FAVOR  很多 人 挺 搞笑 的 ， 说 苹果 就 必须 要 越造 越大才 有人 买 ， 那 你 怎么...      1  \n",
       "18     FAVOR               我 的 今天 刚到 ， 周围 是 有点 黄屏 ， 不过 不 碍事 哈哈哈      1  \n",
       "19     FAVOR  国人 对于 手机 的 用途 还 没 达到 必须 用大屏 的 时代 ， 小屏 的 优势 还是 ...      1  \n",
       "20   AGAINST  说实话 ， 很 讨厌 苹果 的 操作 方式 ， 很 不 方便 。 安卓 一键 回主屏 ， 何...      2  \n",
       "21     FAVOR  系统 没得说   苹果 做 的 就是 系统   开 了 很多 后台 依然 流畅   再 好 ...      1  \n",
       "22      NONE  刚 开封 一天 的 全新 iphonese   4300 买 的   64g   3500 ...      0  \n",
       "23   AGAINST  iphone   se 发布 第天 ， 中国网民 开始 对 这款 4 英寸 设备 进行 集体...      2  \n",
       "24     FAVOR  iphone   se   这 配置 … 这 价格 … 真的 吧 国产机 秒 的 渣 都 不...      1  \n",
       "25   AGAINST  我 的 看法 ， iphone6 造型 确实 比不上 前 代 作品 ， 现在 拿 4 来看 ...      2  \n",
       "26     FAVOR  很 好 的 手机 ， 性价比 高 ， 比 国产 好 的 没话说 ， 国产 一年 一环   苹...      1  \n",
       "27     FAVOR          可以 换 的 ！ 我 一直 都 是 官网 买 的 ， 重来 没 出过 问题 机 ！      1  \n",
       "28     FAVOR  手机 中 的 小钢炮   大屏幕 手机 可能 很多 人 喜欢   但是 无意 也 会 有 喜...      1  \n",
       "29     FAVOR  我 相信 随着 iphone5se 的 发布   iphone5s 的 销量 也 会 迎来 ...      1  \n",
       "..       ...                                                ...    ...  \n",
       "570     NONE                          回答 我   iphone5se 值不值得 买 ？      0  \n",
       "571  AGAINST  作为 售价 相对 低廉 的 一款   iphone ， 并且 具备 与   iphone  ...      2  \n",
       "572    FAVOR                          看上去 ， 比较 适合 女性朋友 们 使用 的 吧      1  \n",
       "573    FAVOR  iphone   se 已有 货 ！ 小 6s ， 作为 苹果 史上 性价比 最高 的 手机...      1  \n",
       "574    FAVOR  别的 不 知道 我 觉得 iphone6 + 在 阳光 下 拍照 发黄 发焦 简直 是 让 ...      1  \n",
       "575    FAVOR            吸引 了 ！ 我 由 iphone6p 转 se ， 单手 触控 感觉良好 ！      1  \n",
       "576  AGAINST  都 快 出 7 了 … … … 有点 脑子 的 人 都 不会 去 买 这个 仿 5s 的 东...      2  \n",
       "577    FAVOR  换 了 深空 灰 的 iphone   se 之后 一点 都 不想 买手机 壳 了 ， 就是...      1  \n",
       "578  AGAINST          我 觉得 续航 肯定 大 悲剧 ， 想象 一下 ip6 续航 的 一半 多么 可怕      2  \n",
       "579     NONE  phone5se 的 传闻 一来 一堆 媒体 表达 小屏 重回 主流 我 不 明白 主流 是...      0  \n",
       "580    FAVOR  iphonese   16gb 💲 399 出来 后 。 。 。 iphone6 还有 6p...      1  \n",
       "581  AGAINST  虽然 我 是 忠实 的 苹果 粉丝 用 的 笔记本 、 手机 和 ipad 都 是 苹果 但...      2  \n",
       "582    FAVOR             讲道理 ， iphone   se 这个 配置 这个 价格 ， 我 心动 了      1  \n",
       "583    FAVOR                                     真机 好 q 啊啊啊 啊 啊      1  \n",
       "584    FAVOR  iphone   se 还 不错 啊 ！ 配置 、 价格 都 是 亮点 ！ 如果 不是 和 ...      1  \n",
       "585  AGAINST  互联网 生态 时代 ， 要 让 用户 只 为 价值 买单 。 硬件 如此 暴利 还 值得 粉...      2  \n",
       "586  AGAINST  iphone   se 相比 6s 少 了 3d   touch 、 128g 内存 选择 ...      2  \n",
       "587    FAVOR      网上 定 的 iphone   se 终于 到 了   开心   不 黄屏   么 么 哒      1  \n",
       "588    FAVOR  iphonese 电池 续航 真 不是 盖 的 ， 重度 使用 一天 无 压力 ， 但 到 ...      1  \n",
       "589     NONE  事实上 ， 除了 尺寸 和 造型 之外 ， iphone   se   和   iphone...      0  \n",
       "590    FAVOR                      今天 卖 的 iphonese 玫瑰 金 ， 的确 够 屌      1  \n",
       "591  AGAINST  屏幕 占 比小屏 也 小 、 手机 像素 差 、 屏幕 分辨率 低 、 高 不 就 低 不下...      2  \n",
       "592    FAVOR  phonese ， 3288   ！ ！     看着 跟 5s 外观 一个样 ！ 但 功能...      1  \n",
       "593  AGAINST  发布 这样 的 产品 真 感觉 苹果公司 快 不行 了 ， 想想 当年 苹果 4   4s ...      2  \n",
       "594    FAVOR                                       外观 好看 用 着 大气      1  \n",
       "595     NONE                       ifix   拆机 证明 了 5s 和 se 屏幕 共用      0  \n",
       "596  AGAINST                                  能 不能不要 和 5s 长得 一样      2  \n",
       "597     NONE  当然 ， 适合 自己 的 才 是 最 重要 的 。 如果 不 喜欢 se 的 大小 那么 6...      0  \n",
       "598    FAVOR  苹果 iphonese 虽然 看起来 可能 会 比 iphone6s 低 一个 档次 ， 但...      1  \n",
       "599    FAVOR      去 店里 看 了 看 ， 跟 我 的 4s   对比 屏幕 根本 不算 黄 啊 ， ， ，      1  \n",
       "\n",
       "[600 rows x 6 columns]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_weibo_phone"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=====数据分布====\n",
      "开放二胎\n",
      "Counter({'FAVOR': 260, 'AGAINST': 200, 'NONE': 140})\n",
      "Counter({'FAVOR': 99, 'AGAINST': 95, 'NONE': 6})\n",
      "俄罗斯叙利亚反恐行动\n",
      "Counter({'AGAINST': 250, 'FAVOR': 250, 'NONE': 100})\n",
      "Counter({'FAVOR': 94, 'AGAINST': 86, 'NONE': 20})\n",
      "深圳禁摩限电\n",
      "Counter({'AGAINST': 300, 'FAVOR': 160, 'NONE': 126, nan: 14})\n",
      "Counter({'AGAINST': 110, 'FAVOR': 63, 'NONE': 27})\n",
      "春节放鞭炮\n",
      "Counter({'FAVOR': 250, 'AGAINST': 250, 'NONE': 100})\n",
      "Counter({'AGAINST': 94, 'FAVOR': 88, 'NONE': 18})\n",
      "iphone se\n",
      "Counter({'FAVOR': 260, 'AGAINST': 200, 'NONE': 140})\n",
      "Counter({'FAVOR': 99, 'AGAINST': 95, 'NONE': 6})\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\pandas\\core\\generic.py:3110: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n",
      "  self[name] = value\n"
     ]
    }
   ],
   "source": [
    "# 注意;训练与测试target 有出入\n",
    "train_weibo_shenzhen = train_weibo[train_weibo.TARGET=='深圳禁摩限电']\n",
    "test_weibo_shenzhen = test_weibo[test_weibo.TARGET=='深圳禁摩限电']\n",
    "train_weibo_russia = train_weibo[train_weibo.TARGET=='俄罗斯在叙利亚的反恐行动']\n",
    "test_weibo_russia = test_weibo[test_weibo.TARGET=='俄罗斯叙利亚反恐行动']\n",
    "train_weibo_chunjie = train_weibo[train_weibo.TARGET=='春节放鞭炮']\n",
    "test_weibo_chunjie = test_weibo[test_weibo.TARGET=='春节放鞭炮']\n",
    "train_weibo_kaifang = train_weibo[train_weibo.TARGET=='开放二胎']\n",
    "test_weibo_kaifang = test_weibo[test_weibo.TARGET=='开放二胎']\n",
    "train_weibo_phone = train_weibo[train_weibo.TARGET=='IphoneSE']\n",
    "test_weibo_phone = test_weibo[test_weibo.TARGET=='iPhone SE']\n",
    "train_weibo_phone.TARGET='iphone se'\n",
    "test_weibo_phone.TARGET='iphone se'\n",
    "\n",
    "\n",
    "print('=====数据分布====')\n",
    "print('开放二胎')\n",
    "print(Counter(train_weibo_kaifang.STANCE.tolist()))\n",
    "print(Counter(test_weibo_kaifang.STANCE.tolist()))\n",
    "print('俄罗斯叙利亚反恐行动')\n",
    "print(Counter(train_weibo_russia.STANCE.tolist()))\n",
    "print(Counter(test_weibo_russia.STANCE.tolist()))\n",
    "print('深圳禁摩限电')\n",
    "print(Counter(train_weibo_shenzhen.STANCE.tolist()))\n",
    "print(Counter(test_weibo_shenzhen.STANCE.tolist()))\n",
    "print('春节放鞭炮')\n",
    "print(Counter(train_weibo_chunjie.STANCE.tolist()))\n",
    "print(Counter(test_weibo_chunjie.STANCE.tolist()))\n",
    "print('iphone se')\n",
    "print(Counter(train_weibo_kaifang.STANCE.tolist()))\n",
    "print(Counter(test_weibo_kaifang.STANCE.tolist()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from keras.models import Model\n",
    "from keras.preprocessing.text import Tokenizer\n",
    "from keras.preprocessing.sequence import pad_sequences\n",
    "from keras.utils.np_utils import to_categorical\n",
    "from keras.layers import Dense, Input,LSTM,Embedding,GRU,Bidirectional,TimeDistributed,concatenate,multiply,dot,add\n",
    "from keras.layers import RepeatVector, Flatten,Permute,Reshape,Lambda,Activation,Dropout,GlobalAveragePooling1D\n",
    "from keras.callbacks import EarlyStopping,ModelCheckpoint\n",
    "from keras.optimizers import Adam\n",
    "from keras import backend as K\n",
    "from keras.engine.topology import Layer\n",
    "from keras import activations, regularizers, constraints\n",
    "import tensorflow as tf\n",
    "from keras.callbacks import ModelCheckpoint\n",
    "from keras import initializers\n",
    "from keras.models import load_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\gensim\\utils.py:862: UserWarning: detected Windows; aliasing chunkize to chunkize_serial\n",
      "  warnings.warn(\"detected Windows; aliasing chunkize to chunkize_serial\")\n"
     ]
    }
   ],
   "source": [
    "# 加载预训练词向量\n",
    "from gensim.models import Word2Vec\n",
    "# w2v = Word2Vec.load('E:/code/tools/vector/smp/smp.w2v.300d')\n",
    "from gensim.models.keyedvectors import KeyedVectors\n",
    "w2v = KeyedVectors.load_word2vec_format('E:/code/tools/vector/GoogleNews-vectors-negative300.bin.gz', binary=True)\n",
    "# w2v = KeyedVectors.load_word2vec_format('E:/code/gensim/vector/wiki.zh.vec', binary=False)\n",
    "# weights=np.array([w2v[word] if word in w2v else \n",
    "#                   np.random.uniform(low=-0.01,high=0.01,size=(300,)) \n",
    "#                  for word in vocabs_list])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 参数设置\n",
    "time_steps = 140 #可以根据每个stance动态调整\n",
    "target_nums = 1\n",
    "batch_size = 50\n",
    "embedding_dims = 300\n",
    "nb_epoch = 50\n",
    "lstm_output_size = 100"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "extra_words = []\n",
    "def get_embedding_matrix(word_index,max_features):\n",
    "    embedding_matrix = np.random.random((max_features, embedding_dims))\n",
    "    for word, i in word_index.items():\n",
    "        if word in w2v:\n",
    "            embedding_matrix[i] = w2v[word]\n",
    "        else:\n",
    "            extra_words.append(word)\n",
    "            embedding_matrix[i] = np.random.uniform(low=-0.01,high=0.01,size=(300,))\n",
    "    return embedding_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def get_train_data(train_df,test_df):\n",
    "    print(\"处理：==={}===\".format(test_df.TARGET.tolist()[0]))\n",
    "    texts = train_df.WORDS.tolist()+test_df.WORDS.tolist()\n",
    "    targets = jieba.lcut(test_df.TARGET.tolist()[0].lower())\n",
    "    tokenizer = Tokenizer(num_words=20000)\n",
    "    tokenizer.fit_on_texts(texts)\n",
    "    sequences = tokenizer.texts_to_sequences(texts)\n",
    "    word_index = tokenizer.word_index\n",
    "    for target in targets:\n",
    "        if target not in word_index.keys():\n",
    "            word_index[target] = len(word_index)+1    \n",
    "    print('Found %s unique tokens.' % len(word_index))\n",
    "    # tokenizer 从1算起\n",
    "    max_features = len(word_index)+1\n",
    "    time_steps = max([len(i) for i in sequences])\n",
    "    data = pad_sequences(sequences, maxlen=time_steps)\n",
    "    labels = to_categorical(np.asarray(train_df.LABEL.tolist()+test_df.LABEL.tolist()))\n",
    "    targets_index = [[word_index[i] for i in targets]]*len(texts)\n",
    "    targets_matrix = np.array(targets_index)\n",
    "    # target 词个数\n",
    "    target_nums = len(targets)\n",
    "    train_len = len(train_df)\n",
    "    embedding_matrix = get_embedding_matrix(word_index,max_features)\n",
    "    train_data = [data[:train_len],targets_matrix[:train_len],labels[:train_len]]\n",
    "    test_data = [data[train_len:],targets_matrix[train_len:],labels[train_len:]]\n",
    "    return train_data,test_data,time_steps,target_nums,embedding_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "处理：===iphone se===\n",
      "Found 4780 unique tokens.\n"
     ]
    }
   ],
   "source": [
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_weibo_russia,test_weibo_russia)\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_weibo_shenzhen,test_weibo_shenzhen)\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_weibo_chunjie,test_weibo_chunjie)\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_weibo_kaifang,test_weibo_kaifang)\n",
    "train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_weibo_phone,test_weibo_phone)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with open('data/phone.pkl','wb')as f:\n",
    "    pickle.dump([train_data,test_data,embedding_matrix],f)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## semeval 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "处理：===legalization of abortion===\n",
      "Found 3290 unique tokens.\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_tweet_feminist,test_tweet_feminist)\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_tweet_climate,test_tweet_climate)\n",
    "# train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_tweet_hillary,test_tweet_hillary)\n",
    "train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(train_tweet_abortion,test_tweet_abortion)\n",
    "with open('data/abortion.pkl','wb')as f:\n",
    "    pickle.dump([train_data,test_data,embedding_matrix],f)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 评价函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import precision_score\n",
    "def f1_score(y_true,y_pred):\n",
    "    pf,pa = precision_score(y_true, y_pred, average=None)[1:]\n",
    "    rf,ra = recall_score(y_true,y_pred,average=None)[1:]\n",
    "    ff = 2*pf*rf/(pf+rf)\n",
    "    fa = 2*pa*ra/(pa+ra)\n",
    "    fa = (ff+fa)/2\n",
    "    return fa\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from keras.models import load_model\n",
    "# model = load_model('tmp/weights.hdf5')\n",
    "def f1_score():\n",
    "    pre = [i.argmax() for i in model.predict(data[:200])]\n",
    "    true = [i.argmax() for i in labels[:200]]\n",
    "    cnts = Counter([(i,j) for i,j in zip(true,pre)])\n",
    "    dict2 = {(0,0):0,(0,1):0,(0,2):0,(1,0):0,(1,1):0,(1,2):0,(2,0):0,(2,1):0,(2,2):0}\n",
    "    for i in cnts:\n",
    "        dict2[i]=cnts[i]\n",
    "    pf=dict2[(1,1)]/(dict2[(0,1)]+dict2[(1,1)]+dict2[(2,1)])\n",
    "    rf=dict2[(1,1)]/(dict2[(1,0)]+dict2[(1,1)]+dict2[(1,2)])\n",
    "    pa=dict2[(2,2)]/(dict2[(0,2)]+dict2[(1,2)]+dict2[(2,2)])\n",
    "    ra=dict2[(2,2)]/(dict2[(2,0)]+dict2[(2,1)]+dict2[(2,2)])\n",
    "    ff = 2*pf*rf/(pf+rf)\n",
    "    fa = 2*pa*ra/(pa+ra)\n",
    "    fa = (ff+fa)/2\n",
    "    print(fa)\n",
    "def micro_fscore(y_true,y_pred):\n",
    "    true = K.cast(K.argmax(y_true,axis=-1),K.floatx())\n",
    "    pred = K.cast(K.argmax(y_pred,axis=-1),K.floatx())\n",
    "    true_positives = K.sum(K.round(K.clip(true*pred,0,1)))\n",
    "    possible_positives = K.sum(K.round(K.clip(true,0,1)))\n",
    "    predicted_positives = K.sum(K.round(K.clip(pred,0,1)))\n",
    "    recall = true_positives / (possible_positives + K.epsilon())\n",
    "    precision = true_positives/(predicted_positives+K.epsilon())\n",
    "#     score = f1_score(K.get_value(K.argmax(y_true)),K.get_value(K.argmax(y_pred)), average=None)[1]\n",
    "#     var = K.cast_to_floatx(score)\n",
    "    return 2*((precision*recall)/(precision+recall))\n",
    "#     return precision\n",
    "def k_f1_score(y_true,y_pred):\n",
    "    sc = K.placeholder(shape=(1))\n",
    "    K.set_value(sc,precision_score(K.get_value(K.argmax(y_true)),K.get_value(K.argmax(y_pred)), average=None)[1])\n",
    "    return sc"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## svm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 136,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:1059: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):\n"
     ]
    }
   ],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn import svm\n",
    "tfidf = TfidfVectorizer(min_df=2,ngram_range=(1,2))\n",
    "x_tfidf = tfidf.fit_transform(train_weibo_russia.WORDS)\n",
    "clf = svm.LinearSVC(C=0.1)\n",
    "training_set_X = x_tfidf\n",
    "training_set_y = train_weibo_russia.LABEL\n",
    "clf.fit(training_set_X, training_set_y)\n",
    "prediction = clf.predict(val)\n",
    "f1_score(test_weibo_russia.LABEL.tolist(),prediction)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## common lstm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def common_lstm():\n",
    "    inputs = Input(shape=(time_steps,),dtype='int32')\n",
    "    x = Embedding(embedding_matrix.shape[0],\n",
    "                  embedding_dims,\n",
    "                  trainable=True,\n",
    "#                   weights=[embedding_matrix],\n",
    "                 input_length=time_steps)(inputs)\n",
    "    x = Dropout(0.2)(x)\n",
    "#     小数据GRU？\n",
    "    lstm_out = GRU(lstm_output_size,dropout=0.2,recurrent_dropout=0.2)(x)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                       activation='softmax')(lstm_out)\n",
    "    model = Model(inputs=inputs, outputs=predictions)\n",
    "    opt = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n",
    "    model.compile(optimizer=opt,\n",
    "                  loss='categorical_crossentropy',\n",
    "                  metrics=['accuracy',k_f1_score])\n",
    "    return model\n",
    "# gru 0.54,0.68,0.56,0.74,0.67"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 124,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:root:Internal Python error in the inspect module.\n",
      "Below is the traceback from this internal error.\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Traceback (most recent call last):\n",
      "  File \"D:\\program\\Lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 2881, in run_code\n",
      "    exec(code_obj, self.user_global_ns, self.user_ns)\n",
      "  File \"<ipython-input-124-7f7fdacd0c25>\", line 3, in <module>\n",
      "    model = common_lstm()\n",
      "  File \"<ipython-input-123-80b380cb8f68>\", line 18, in common_lstm\n",
      "    metrics=['accuracy',k_f1_score])\n",
      "  File \"D:\\program\\Lib\\site-packages\\keras\\engine\\training.py\", line 914, in compile\n",
      "    handle_metrics(output_metrics)\n",
      "  File \"D:\\program\\Lib\\site-packages\\keras\\engine\\training.py\", line 911, in handle_metrics\n",
      "    mask=masks[i])\n",
      "  File \"D:\\program\\Lib\\site-packages\\keras\\engine\\training.py\", line 426, in weighted\n",
      "    score_array = fn(y_true, y_pred)\n",
      "  File \"<ipython-input-122-ed4f96dea3dd>\", line 3, in k_f1_score\n",
      "    K.set_value(sc,precision_score(K.get_value(K.argmax(y_true)),K.get_value(K.argmax(y_pred)), average=None)[1])\n",
      "  File \"D:\\program\\Lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2307, in get_value\n",
      "    return x.eval(session=get_session())\n",
      "  File \"D:\\program\\Lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 189, in get_session\n",
      "    [tf.is_variable_initialized(v) for v in candidate_vars])\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 895, in run\n",
      "    run_metadata_ptr)\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1124, in _run\n",
      "    feed_dict_tensor, options, run_metadata)\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1321, in _do_run\n",
      "    options, run_metadata)\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1327, in _do_call\n",
      "    return fn(*args)\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1297, in _run_fn\n",
      "    self._extend_graph()\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1353, in _extend_graph\n",
      "    add_shapes=self._add_shapes)\n",
      "  File \"D:\\program\\Lib\\site-packages\\tensorflow\\python\\framework\\ops.py\", line 2437, in _as_graph_def\n",
      "    op = self._nodes_by_id[op_id]\n",
      "KeyboardInterrupt\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"D:\\program\\Lib\\site-packages\\IPython\\core\\interactiveshell.py\", line 1821, in showtraceback\n",
      "    stb = value._render_traceback_()\n",
      "AttributeError: 'KeyboardInterrupt' object has no attribute '_render_traceback_'\n",
      "\n",
      "During handling of the above exception, another exception occurred:\n",
      "\n",
      "Traceback (most recent call last):\n",
      "  File \"D:\\program\\Lib\\site-packages\\IPython\\core\\ultratb.py\", line 1132, in get_records\n",
      "    return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)\n",
      "  File \"D:\\program\\Lib\\site-packages\\IPython\\core\\ultratb.py\", line 313, in wrapped\n",
      "    return f(*args, **kwargs)\n",
      "  File \"D:\\program\\Lib\\site-packages\\IPython\\core\\ultratb.py\", line 358, in _fixed_getinnerframes\n",
      "    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))\n",
      "  File \"D:\\program\\lib\\inspect.py\", line 1453, in getinnerframes\n",
      "    frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\n",
      "  File \"D:\\program\\lib\\inspect.py\", line 1421, in getframeinfo\n",
      "    index = lineno - 1 - start\n",
      "KeyboardInterrupt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "KeyboardInterrupt\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# early_stopping = EarlyStopping(monitor='val_loss',patience=5)\n",
    "# checkpointer = ModelCheckpoint(filepath='tmp/weights.hdf5',verbose=1,save_best_only=True)\n",
    "model = common_lstm()\n",
    "model.fit(train_data[0], train_data[2],\n",
    "          batch_size=batch_size,\n",
    "          epochs=nb_epoch,\n",
    "          verbose=1,\n",
    "          validation_data=(test_data[0],test_data[2]),\n",
    "          callbacks=[early_stopping],\n",
    "          shuffle=True)\n",
    "y_pred = [i.argmax() for i in model.predict(test_data[0])]\n",
    "y_true = [i.argmax() for i in test_data[2]]\n",
    "print(\"=====f值：{}=========\".format(f1_score(y_true,y_pred)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Stance Classification with Target-Specific Neural Attention Networks\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def softmask(x, mask,axis=1):\n",
    "    '''softmax with mask, used in attention mechanism others\n",
    "    '''\n",
    "    y = K.exp(x)\n",
    "    if mask is not None:\n",
    "        y = y * tf.to_float(mask)\n",
    "    sumx = K.sum(y, axis=axis, keepdims=True) + 1e-6\n",
    "    x = y / sumx\n",
    "    return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class TanAttention(Layer):\n",
    "    def __init__(self,**kwargs):\n",
    "#         None 100 300     None 300\n",
    "        self.support_mask=True\n",
    "        super(TanAttention, self).__init__(**kwargs)\n",
    "        \n",
    "    def build(self,input_shape):\n",
    "#         self.W = K.random_uniform_variable((input_shape[0][2],input_shape[0][2]),0,1, dtype ='float32',name='{}_W'.format(self.name))\n",
    "        self.kernel = self.add_weight(name = 'kernel',\n",
    "                                     shape=(input_shape[2],1),\n",
    "                                     initializer=initializers.RandomUniform(minval=-0.1,maxval=0.1),trainable=True)\n",
    "        self.bias = self.add_weight(name = 'bias',\n",
    "                                     shape=(1,),\n",
    "                                     initializer=initializers.RandomUniform(minval=-0.1,maxval=0.1),trainable=True)\n",
    "#         self.trainable_weights=[self.W]\n",
    "        super(TanAttention,self).build(input_shape)\n",
    "    \n",
    "    def call(self, x,mask=None):\n",
    "        probs = K.bias_add(K.dot(x,self.kernel),self.bias)\n",
    "        probs2 = K.reshape(probs,(-1,K.int_shape(probs)[1]))\n",
    "        atten = softmask(probs2,mask)\n",
    "        return atten\n",
    "    \n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return (input_shape[0],input_shape[1])\n",
    "    \n",
    "    def compute_mask(self, x, mask=None):\n",
    "        if mask is not None:\n",
    "            return mask\n",
    "        else:\n",
    "            return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def tan():\n",
    "    sentence_input = Input(shape=(time_steps,),dtype='int32',name='sentence_input')  \n",
    "#     targe 词向量均值\n",
    "    target_input = Input(shape=(target_nums,),dtype='int32',name='target_input')\n",
    "    embedding_layer = Embedding(embedding_matrix.shape[0],\n",
    "                                embedding_dims,\n",
    "                                weights=[embedding_matrix],\n",
    "                                trainable=True,\n",
    "                                mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    left_x = embedding_layer(sentence_input)\n",
    "    left_x = Dropout(0.2)(left_x)\n",
    "    target_x = embedding_layer(target_input)\n",
    "    target_x = Lambda(lambda x: K.mean(x, axis=1))(target_x)\n",
    "    target_matrix = RepeatVector(time_steps)(target_x)  #140*300\n",
    "    right_x = concatenate([left_x,target_matrix])   #140*600\n",
    "#     attention_probs = Dense(1)(right_x)\n",
    "#     转置\n",
    "#     attention_probs = Permute((2,1))(attention_probs)\n",
    "#     left_x = Bidirectional(LSTM(150,dropout=0.2,recurrent_dropout=0.2,return_sequences=True))(left_x)  #140*300\n",
    "    left_x = Bidirectional(LSTM(100,dropout=0.2,recurrent_dropout=0.2,return_sequences=True))(left_x)  #140*300\n",
    "    att_c = TanAttention(name='attention')(right_x)\n",
    "#     attention_mul = multiply([left_x,attention_probs],name='attention_mul')\n",
    "#     attention_mul = Lambda(lambda x: K.sum(x, axis=1), output_shape=(1,200),name='dim_reduction')(attention_mul)  #句子向量表示300维 tensorflow\n",
    "#     attention_mul = Reshape((200,))(attention_mul)\n",
    "#     attention_mul = Activation('tanh')(attention_mul)\n",
    "    attention_mul = dot([att_c,left_x],axes=1,name='attention_mul')\n",
    "#     attention_mul = Reshape((200,))(attention_mul)\n",
    "    attention_mul = Dropout(0.2)(attention_mul)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                        activation='softmax')(attention_mul)\n",
    "    model = Model(inputs=[sentence_input,target_input],outputs=predictions)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 600 samples, validate on 200 samples\n",
      "Epoch 1/50\n",
      "600/600 [==============================] - 15s 25ms/step - loss: 2.7015 - acc: 0.4283 - val_loss: 1.2362 - val_acc: 0.4700\n",
      "Epoch 2/50\n",
      "600/600 [==============================] - 14s 23ms/step - loss: 1.2683 - acc: 0.4217 - val_loss: 1.0998 - val_acc: 0.4850\n",
      "Epoch 3/50\n",
      "600/600 [==============================] - 14s 23ms/step - loss: 1.0815 - acc: 0.4617 - val_loss: 1.0826 - val_acc: 0.5000\n",
      "Epoch 4/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0785 - acc: 0.4567 - val_loss: 1.0775 - val_acc: 0.5100\n",
      "Epoch 5/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0539 - acc: 0.4700 - val_loss: 1.0722 - val_acc: 0.5150\n",
      "Epoch 6/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0565 - acc: 0.4833 - val_loss: 1.0662 - val_acc: 0.4950\n",
      "Epoch 7/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0435 - acc: 0.4817 - val_loss: 1.0583 - val_acc: 0.5100\n",
      "Epoch 8/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0557 - acc: 0.4817 - val_loss: 1.0519 - val_acc: 0.4950\n",
      "Epoch 9/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0377 - acc: 0.5033 - val_loss: 1.0459 - val_acc: 0.4900\n",
      "Epoch 10/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0461 - acc: 0.4400 - val_loss: 1.0447 - val_acc: 0.5000\n",
      "Epoch 11/50\n",
      "600/600 [==============================] - 14s 23ms/step - loss: 1.0380 - acc: 0.4867 - val_loss: 1.0402 - val_acc: 0.5000\n",
      "Epoch 12/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 1.0463 - acc: 0.4717 - val_loss: 1.0378 - val_acc: 0.5000\n",
      "Epoch 13/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0261 - acc: 0.5383 - val_loss: 1.0346 - val_acc: 0.5050\n",
      "Epoch 14/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0377 - acc: 0.4833 - val_loss: 1.0329 - val_acc: 0.5200\n",
      "Epoch 15/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0184 - acc: 0.5467 - val_loss: 1.0317 - val_acc: 0.5200\n",
      "Epoch 16/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 1.0045 - acc: 0.5167 - val_loss: 1.0255 - val_acc: 0.5100\n",
      "Epoch 17/50\n",
      "600/600 [==============================] - 14s 23ms/step - loss: 1.0045 - acc: 0.5217 - val_loss: 1.0238 - val_acc: 0.5100\n",
      "Epoch 18/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 0.9872 - acc: 0.4950 - val_loss: 1.0197 - val_acc: 0.4950\n",
      "Epoch 19/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 0.9608 - acc: 0.5350 - val_loss: 1.0216 - val_acc: 0.4950\n",
      "Epoch 20/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 0.9824 - acc: 0.5183 - val_loss: 1.0196 - val_acc: 0.4950\n",
      "Epoch 21/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 0.9768 - acc: 0.5167 - val_loss: 1.0236 - val_acc: 0.5200\n",
      "Epoch 22/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 0.9508 - acc: 0.5517 - val_loss: 1.0299 - val_acc: 0.5450\n",
      "Epoch 23/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 0.9555 - acc: 0.5217 - val_loss: 1.0276 - val_acc: 0.5150\n",
      "Epoch 24/50\n",
      "600/600 [==============================] - 13s 22ms/step - loss: 0.9580 - acc: 0.5367 - val_loss: 1.0353 - val_acc: 0.5400\n",
      "Epoch 25/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 0.9161 - acc: 0.5667 - val_loss: 1.0348 - val_acc: 0.5350\n",
      "=====f值：0.5631490787269682=========\n"
     ]
    }
   ],
   "source": [
    "early_stopping = EarlyStopping(monitor='val_loss',patience=5)\n",
    "# checkpointer = ModelCheckpoint(filepath='tmp/weights.hdf5',verbose=1,save_best_only=True)\n",
    "model = tan()\n",
    "opt = Adam(lr=0.0005)\n",
    "#     opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08) 论文的不不可以，改成普通adam10epoch后会\n",
    "model.compile(loss='categorical_crossentropy',optimizer=opt,\n",
    "             metrics=['accuracy'])\n",
    "model.fit([train_data[0],train_data[1]],train_data[2],\n",
    "          batch_size=batch_size,\n",
    "          epochs=nb_epoch,\n",
    "          verbose=1,\n",
    "          validation_data=([test_data[0],test_data[1]],test_data[2]),\n",
    "          callbacks=[early_stopping],\n",
    "          shuffle=True)\n",
    "y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "y_true = [i.argmax() for i in test_data[2]]\n",
    "print(\"=====f值：{}=========\".format(f1_score(y_true,y_pred)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## changed"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from models.layers import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def softmask(x, mask,axis=-1):\n",
    "    '''softmax with mask, used in attention mechanism others\n",
    "    '''\n",
    "    y = K.exp(x)\n",
    "    if mask is not None:\n",
    "        y = y * tf.to_float(mask)\n",
    "    sumx = K.sum(y, axis=axis, keepdims=True) + 1e-6\n",
    "    x = y / sumx\n",
    "    return K.relu(x)\n",
    "class AttentionLayer(Layer):\n",
    "    def __init__(self,**kwargs):\n",
    "#         None 100 300     None 300\n",
    "        self.support_mask=True\n",
    "        super(AttentionLayer, self).__init__(**kwargs)\n",
    "        \n",
    "    def build(self,input_shape):\n",
    "#         self.W = K.random_uniform_variable((input_shape[0][2],input_shape[0][2]),0,1, dtype ='float32',name='{}_W'.format(self.name))\n",
    "        self.W = self.add_weight(name = 'kernel',\n",
    "                                     shape=(input_shape[0][2],input_shape[0][2]),\n",
    "                                     initializer=initializers.RandomUniform(minval=-0.1,maxval=0.1),trainable=True)\n",
    "#         self.bias = self.add_weight(name = 'bias',\n",
    "#                                      shape=(1,),\n",
    "#                                      initializer=initializers.RandomUniform(minval=-0.1,maxval=0.1),trainable=True)\n",
    "#         self.trainable_weights=[self.W]\n",
    "        super(AttentionLayer,self).build(input_shape)\n",
    "    \n",
    "    def call(self, x,mask=None):\n",
    "        h = x[0]  #none 100 300\n",
    "        hw = K.dot(h,self.W)\n",
    "        t = x[1]  #none 300\n",
    "        hwt = dot([hw,t],axes=[2,1])\n",
    "        tanh_result = K.tanh(hwt)\n",
    "#         tanh_result = K.tanh(K.bias_add(hwt,self.bias))\n",
    "        atten = softmask(tanh_result,mask[0])\n",
    "        return atten\n",
    "    \n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return (input_shape[0][0],input_shape[0][1])\n",
    "    \n",
    "    def compute_mask(self, x, mask=None):\n",
    "        if mask:\n",
    "            return mask[0]\n",
    "        else:\n",
    "            return None\n",
    "class ClearMaskLayer(Layer):\n",
    "    '''after using a layer that supports masking in keras,\n",
    "    you can use this layer to remove the mask before softmax layer\n",
    "    '''\n",
    "    def __init__(self, **kwargs):\n",
    "        self.supports_masking = True\n",
    "        super(ClearMaskLayer, self).__init__(**kwargs)\n",
    "    \n",
    "    def compute_output_shape(self, input_shape):\n",
    "        return input_shape\n",
    "    \n",
    "    def compute_mask(self, x, mask=None):\n",
    "        return None"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 改进ATAE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_new():\n",
    "    sentence_input = Input(shape=(time_steps,),dtype='int32',name='sentence_input')  \n",
    "#     targe 词向量均值\n",
    "    target_input = Input(shape=(target_nums,),dtype='int32',name='target_input')\n",
    "    embedding_layer = Embedding(embedding_matrix.shape[0],\n",
    "                                embedding_dims,\n",
    "                                weights=[embedding_matrix],\n",
    "                                trainable=False,\n",
    "                                mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    left_x = embedding_layer(sentence_input)\n",
    "#     left_x = Dropout(0.2)(left_x)\n",
    "    target_x = embedding_layer(target_input)\n",
    "    target_x = ClearMaskLayer()(target_x)\n",
    "    target_weight = Dense(1,name='target_weights')(target_x)\n",
    "    target_weight = Reshape((target_nums,))(target_weight)\n",
    "    target_weight = Activation('softmax',name='target_softmax')(target_weight)\n",
    "    target_rep = dot([target_weight,target_x],axes=1)\n",
    "    target_rep = Dense(lstm_output_size*2,activation='tanh')(target_rep)\n",
    "    target_rep = Reshape((lstm_output_size*2,))(target_rep)\n",
    "    left_x = Bidirectional(LSTM(lstm_output_size,dropout=0.2,recurrent_dropout=0.2,return_sequences=True))(left_x)  #140*300\n",
    "    \n",
    "    att_c = AttentionLayer()([left_x,target_rep])\n",
    "    cr = dot([att_c,left_x],axes=1,name='attention_mul') #None*300\n",
    "    attention_mul = Dropout(0.2)(cr)\n",
    "    attention_mul = Dense(100,activation='tanh')(attention_mul)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                        activation='softmax')(attention_mul)\n",
    "    model = Model(inputs=[sentence_input,target_input],outputs=predictions)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def create_new2():\n",
    "    sentence_input = Input(shape=(time_steps,),dtype='int32',name='sentence_input')  \n",
    "#     targe 词向量均值\n",
    "    target_input = Input(shape=(target_nums,),dtype='int32',name='target_input')\n",
    "    embedding_layer = Embedding(embedding_matrix.shape[0],\n",
    "                                embedding_dims,\n",
    "                                weights=[embedding_matrix],\n",
    "                                trainable=True,\n",
    "                                mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    embedding_layer2 = Embedding(target_nums+1,\n",
    "                                embedding_dims,\n",
    "#                                 weights=[embedding_matrix],\n",
    "                                trainable=True,\n",
    "#                                 mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    left_x = embedding_layer(sentence_input)\n",
    "#     left_x = Dropout(0.2)(left_x)\n",
    "    target_x = embedding_layer2(target_input)\n",
    "#     target_x = ClearMaskLayer()(target_x)\n",
    "    target_weight = Dense(1,name='target_weights')(target_x)\n",
    "    target_weight = Reshape((target_nums,),name='reshape_target_weight')(target_weight)\n",
    "    target_weight = Activation('softmax',name='target_softmax')(target_weight)\n",
    "    target_rep = dot([target_weight,target_x],axes=1)\n",
    "    target_rep = Dense(lstm_output_size,activation='tanh')(target_rep)\n",
    "#     target_rep = Reshape((lstm_output_size,))(target_rep)\n",
    "    left_x = LSTM(lstm_output_size,dropout=0.2,recurrent_dropout=0.2,return_sequences=True)(left_x)  #140*300\n",
    "    \n",
    "    att_c = AttentionLayer()([left_x,target_rep])\n",
    "    cr = dot([att_c,left_x],axes=1,name='attention_mul') #None*300\n",
    "    attention_mul = Dropout(0.2)(cr)\n",
    "    attention_mul = Dense(100,activation='tanh')(attention_mul)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                        activation='softmax')(attention_mul)\n",
    "    model = Model(inputs=[sentence_input,target_input],outputs=predictions)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 改进TAN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def target_sentence():\n",
    "    sentence_input = Input(shape=(time_steps,),dtype='int32',name='sentence_input')  \n",
    "#     targe 词向量均值\n",
    "    target_input = Input(shape=(target_nums,),dtype='int32',name='target_input')\n",
    "    embedding_layer = Embedding(embedding_matrix.shape[0],\n",
    "                                embedding_dims,\n",
    "                                weights=[embedding_matrix],\n",
    "                                trainable=True,\n",
    "                                mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    left_x = embedding_layer(sentence_input)\n",
    "#     left_x = Dropout(0.2)(left_x)\n",
    "    target_x = embedding_layer(target_input)\n",
    "    target_x = ClearMaskLayer()(target_x)\n",
    "    target_weight = Dense(1,name='target_weights',use_bias=False)(target_x)\n",
    "    target_weight = Reshape((target_nums,))(target_weight)\n",
    "    target_weight = Activation('softmax',name='target_softmax')(target_weight)\n",
    "    target_rep = dot([target_weight,target_x],axes=1)\n",
    "    target_rep = Dense(300,activation='tanh')(target_rep)\n",
    "    att_c = AttentionLayer(name='att')([left_x,target_rep])\n",
    "    left_x = Bidirectional(LSTM(lstm_output_size,dropout=0.2,recurrent_dropout=0.2,return_sequences=True))(left_x)  #140*300\n",
    "#     h = Lambda(lambda x:tf.slice(x, [0,time_steps-1, 0], [-1, 1, -1]))(h)\n",
    "    cr = dot([att_c,left_x],axes=1,name='attention_mul') #None*300\n",
    "    attention_mul = Dropout(0.2)(cr)\n",
    "    attention_mul = Dense(100,activation='tanh')(attention_mul)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                        activation='softmax')(attention_mul)\n",
    "    model = Model(inputs=[sentence_input,target_input],outputs=predictions)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def target_sentence2():\n",
    "    sentence_input = Input(shape=(time_steps,),dtype='int32',name='sentence_input')  \n",
    "#     targe 词向量均值\n",
    "    target_input = Input(shape=(target_nums,),dtype='int32',name='target_input')\n",
    "    embedding_layer = Embedding(embedding_matrix.shape[0],\n",
    "                                embedding_dims,\n",
    "                                weights=[embedding_matrix],\n",
    "                                trainable=True,\n",
    "                                mask_zero=True,\n",
    "                                name='word_embedding')\n",
    "    sentence_embedding = embedding_layer(sentence_input)\n",
    "#     left_x = Dropout(0.2)(left_x)\n",
    "    target_x = embedding_layer(target_input)\n",
    "    target_x = ClearMaskLayer()(target_x)\n",
    "    target_weight = Dense(1,name='target_weights',activation='tanh')(target_x)\n",
    "    target_weight = Reshape((target_nums,))(target_weight)\n",
    "    target_weight = Activation('softmax',name='target_softmax')(target_weight)\n",
    "    target_rep = dot([target_weight,target_x],axes=1)\n",
    "    target_rep = Dense(300,activation='tanh',use_bias=True,bias_initializer=initializers.random_uniform(minval=-0.01,maxval=0.01))(target_rep)\n",
    "#     target_rep = Reshape((300,))(target_rep)\n",
    "    att_c = AttentionLayer(name='att')([left_x,target_rep])\n",
    "    left_x = LSTM(lstm_output_size,dropout=0.2,recurrent_dropout=0.2,return_sequences=True)(left_x)  #140*300\n",
    "    h = Lambda(lambda x:tf.slice(x, [0,time_steps-1, 0], [-1, 1, -1]))(left_x)\n",
    "    h = Flatten()(h)\n",
    "#     h = Dense(lstm_output_size)(h)\n",
    "#     h = Lambda(lambda x:tf.slice(x, [0,time_steps-1, 0], [-1, 1, -1]))(h)\n",
    "    cr = dot([att_c,left_x],axes=1,name='attention_mul') #None*300\n",
    "#     cr = Dense(lstm_output_size)(cr)\n",
    "    c = add([cr,h])\n",
    "    attention_mul = Dropout(0.2)(c)\n",
    "    attention_mul = Dense(100,activation='tanh')(attention_mul)\n",
    "    predictions = Dense(3,\n",
    "#                         kernel_regularizer=regularizers.l2(0.01),\n",
    "                        activation='softmax')(attention_mul)\n",
    "    model = Model(inputs=[sentence_input,target_input],outputs=predictions)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'EarlyStopping' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-13910b050db2>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mearly_stopping\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mEarlyStopping\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmonitor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'val_loss'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpatience\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[1;31m# checkpointer = ModelCheckpoint(filepath='tmp/weights.hdf5',verbose=1,save_best_only=True)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcreate_new\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mopt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mAdam\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlr\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.0005\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[1;31m#     opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08) 论文的不不可以，改成普通adam10epoch后会\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'EarlyStopping' is not defined"
     ]
    }
   ],
   "source": [
    "early_stopping = EarlyStopping(monitor='val_loss',patience=5)\n",
    "# checkpointer = ModelCheckpoint(filepath='tmp/weights.hdf5',verbose=1,save_best_only=True)\n",
    "model = create_new()\n",
    "opt = Adam(lr=0.0005)\n",
    "#     opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08) 论文的不不可以，改成普通adam10epoch后会\n",
    "model.compile(loss='categorical_crossentropy',optimizer=opt,\n",
    "             metrics=['accuracy'])\n",
    "model.fit([train_data[0][:540],train_data[1][:540]],train_data[2][:540],\n",
    "          batch_size=batch_size,\n",
    "          epochs=nb_epoch,\n",
    "          verbose=1,\n",
    "          validation_data=([test_data[0],test_data[1]],test_data[2]),\n",
    "          callbacks=[early_stopping],\n",
    "          shuffle=True)\n",
    "y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "y_true = [i.argmax() for i in test_data[2]]\n",
    "print(\"=====f值：{}=========\".format(f1_score(y_true,y_pred)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 可视化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 186,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "ename": "InvalidArgumentError",
     "evalue": "Node 'word_embedding_98/IsVariableInitialized': Unknown input node 'target_weights_46/kernel'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mInvalidArgumentError\u001b[0m                      Traceback (most recent call last)",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1326\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1327\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1328\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   1296\u001b[0m       \u001b[1;31m# Ensure any changes to the graph are reflected in the runtime.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1297\u001b[1;33m       \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1298\u001b[0m       \u001b[1;32mwith\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mraise_exception_on_not_ok_status\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mstatus\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_extend_graph\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m   1357\u001b[0m           tf_session.TF_ExtendGraph(\n\u001b[1;32m-> 1358\u001b[1;33m               self._session, graph_def.SerializeToString(), status)\n\u001b[0m\u001b[0;32m   1359\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_opened\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\lib\\contextlib.py\u001b[0m in \u001b[0;36m__exit__\u001b[1;34m(self, type, value, traceback)\u001b[0m\n\u001b[0;32m     88\u001b[0m             \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 89\u001b[1;33m                 \u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     90\u001b[0m             \u001b[1;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\u001b[0m in \u001b[0;36mraise_exception_on_not_ok_status\u001b[1;34m()\u001b[0m\n\u001b[0;32m    465\u001b[0m           \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 466\u001b[1;33m           pywrap_tensorflow.TF_GetCode(status))\n\u001b[0m\u001b[0;32m    467\u001b[0m   \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mInvalidArgumentError\u001b[0m: Node 'word_embedding_98/IsVariableInitialized': Unknown input node 'target_weights_46/kernel'",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mInvalidArgumentError\u001b[0m                      Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-186-d3acb574c0cc>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtarget_sentence\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msummary\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mopt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mAdam\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlr\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.001\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mopt\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'categorical_crossentropy'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'-----------------------------------------'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-177-5029b8b181f0>\u001b[0m in \u001b[0;36mtarget_sentence\u001b[1;34m()\u001b[0m\n\u001b[0;32m     22\u001b[0m                                 \u001b[0mmask_zero\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     23\u001b[0m                                 name='word_embedding')\n\u001b[1;32m---> 24\u001b[1;33m     \u001b[0mleft_x\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0membedding_layer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msentence_input\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     25\u001b[0m \u001b[1;31m#     left_x = Dropout(0.2)(left_x)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     26\u001b[0m     \u001b[0mtarget_x\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0membedding_layer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtarget_input\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\engine\\topology.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, inputs, **kwargs)\u001b[0m\n\u001b[0;32m    595\u001b[0m                 \u001b[1;31m# Load weights that were specified at layer instantiation.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    596\u001b[0m                 \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_initial_weights\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 597\u001b[1;33m                     \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset_weights\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_initial_weights\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    598\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    599\u001b[0m             \u001b[1;31m# Raise exceptions in case the input is not compatible\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\engine\\topology.py\u001b[0m in \u001b[0;36mset_weights\u001b[1;34m(self, weights)\u001b[0m\n\u001b[0;32m   1209\u001b[0m             \u001b[1;32mreturn\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1210\u001b[0m         \u001b[0mweight_value_tuples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1211\u001b[1;33m         \u001b[0mparam_values\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mK\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbatch_get_value\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1212\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mpv\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mp\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mw\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mparam_values\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mparams\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mweights\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1213\u001b[0m             \u001b[1;32mif\u001b[0m \u001b[0mpv\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m \u001b[1;33m!=\u001b[0m \u001b[0mw\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36mbatch_get_value\u001b[1;34m(ops)\u001b[0m\n\u001b[0;32m   2318\u001b[0m     \"\"\"\n\u001b[0;32m   2319\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mops\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2320\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mget_session\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mops\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2321\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2322\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36mget_session\u001b[1;34m()\u001b[0m\n\u001b[0;32m    187\u001b[0m                 \u001b[1;31m# not already marked as initialized.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    188\u001b[0m                 is_initialized = session.run(\n\u001b[1;32m--> 189\u001b[1;33m                     [tf.is_variable_initialized(v) for v in candidate_vars])\n\u001b[0m\u001b[0;32m    190\u001b[0m                 \u001b[0muninitialized_vars\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    191\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mflag\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mv\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mis_initialized\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcandidate_vars\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    893\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    894\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 895\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    896\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    897\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1122\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1123\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1124\u001b[1;33m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m   1125\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1126\u001b[0m       \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1319\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1320\u001b[0m       return self._do_call(_run_fn, self._session, feeds, fetches, targets,\n\u001b[1;32m-> 1321\u001b[1;33m                            options, run_metadata)\n\u001b[0m\u001b[0;32m   1322\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1323\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1338\u001b[0m         \u001b[1;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1339\u001b[0m           \u001b[1;32mpass\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1340\u001b[1;33m       \u001b[1;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mop\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1341\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1342\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mInvalidArgumentError\u001b[0m: Node 'word_embedding_98/IsVariableInitialized': Unknown input node 'target_weights_46/kernel'"
     ]
    }
   ],
   "source": [
    "model = target_sentence()\n",
    "model.summary()\n",
    "opt = Adam(lr=0.001)\n",
    "model.compile(optimizer=opt, loss='categorical_crossentropy')\n",
    "print('-----------------------------------------')\n",
    "for i_e in range(20):\n",
    "    iter_data = data_generator(train_data[0][:500],train_data[1][:500],train_data[2][:500],100)\n",
    "    loss,acc,total=0,0,0\n",
    "    for x0,x1,label_iter in iter_data:\n",
    "#         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\n",
    "        history = model.train_on_batch([x0,x1],label_iter)\n",
    "        num = len(label_iter)\n",
    "        loss,total=loss+history*num,total+num\n",
    "#         print('Iter:%d\\tloss=%.6f' % (i_e, history.history['loss'][0]), end='\\n')\n",
    "    loss=loss/total\n",
    "    v_pred = [i.argmax() for i in model.predict([train_data[0][500:],train_data[1][500:]])]\n",
    "    v_true = [i.argmax() for i in train_data[2][500:]]\n",
    "    y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "    y_true = [i.argmax() for i in test_data[2]]\n",
    "    valid_score = f1_score(v_true,v_pred)\n",
    "    valid_acc=np.mean(np.equal(v_pred,v_true),axis=-1)\n",
    "    test_score = f1_score(y_true,y_pred)\n",
    "    test_acc=np.mean(np.equal(y_pred,y_true),axis=-1)\n",
    "    print('Iter: {} trainloss:{} validscore:{}  validacc:{}  testscore:{}  testacc:{}' .format (i_e+1,loss,valid_score,valid_acc,test_score,test_acc),end='\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 153,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "intermediate_layer_model = Model(inputs=model.input,\n",
    "#                                  outputs=model.get_layer('target_softmax').output\n",
    "                                 outputs=model.get_layer('att').output\n",
    "                                )\n",
    "att_vis = intermediate_layer_model.predict([test_data[0],test_data[1]])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 159,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1 1\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "array([0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.        ,\n",
       "       0.        , 0.        , 0.        , 0.        , 0.02503425,\n",
       "       0.02503425, 0.18497944, 0.02503425, 0.18497944, 0.18497944,\n",
       "       0.18497944, 0.18497944], dtype=float32)"
      ]
     },
     "execution_count": 159,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 深圳禁摩限电\n",
    "# 70,131\n",
    "numid = 70\n",
    "print(y_pred[numid],y_true[numid])\n",
    "att_vis[numid]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Const:0' shape=(10, 20, 5) dtype=float32>"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "b = K.constant(np.ones((5,1)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([1., 1., 1., 1., 1.], dtype=float32)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "K.eval(b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "c = K.dot(a,b)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Reshape_8:0' shape=(10, 20, 1) dtype=float32>"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "c"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Tensor 'Reshape_9:0' shape=(200,) dtype=float32>"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "K.flatten(c)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### train on Batch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "sentence_input (InputLayer)     (None, 101)          0                                            \n",
      "__________________________________________________________________________________________________\n",
      "target_input (InputLayer)       (None, 4)            0                                            \n",
      "__________________________________________________________________________________________________\n",
      "word_embedding (Embedding)      multiple             1570500     sentence_input[0][0]             \n",
      "                                                                 target_input[0][0]               \n",
      "__________________________________________________________________________________________________\n",
      "clear_mask_layer_19 (ClearMaskL (None, 4, 300)       0           word_embedding[1][0]             \n",
      "__________________________________________________________________________________________________\n",
      "target_weights (Dense)          (None, 4, 1)         300         clear_mask_layer_19[0][0]        \n",
      "__________________________________________________________________________________________________\n",
      "reshape_19 (Reshape)            (None, 4)            0           target_weights[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "target_softmax (Activation)     (None, 4)            0           reshape_19[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "dot_34 (Dot)                    (None, 300)          0           target_softmax[0][0]             \n",
      "                                                                 clear_mask_layer_19[0][0]        \n",
      "__________________________________________________________________________________________________\n",
      "dense_55 (Dense)                (None, 300)          90300       dot_34[0][0]                     \n",
      "__________________________________________________________________________________________________\n",
      "att (AttentionLayer)            (None, 101)          90000       word_embedding[0][0]             \n",
      "                                                                 dense_55[0][0]                   \n",
      "__________________________________________________________________________________________________\n",
      "bidirectional_19 (Bidirectional (None, 101, 200)     320800      word_embedding[0][0]             \n",
      "__________________________________________________________________________________________________\n",
      "attention_mul (Dot)             (None, 200)          0           att[0][0]                        \n",
      "                                                                 bidirectional_19[0][0]           \n",
      "__________________________________________________________________________________________________\n",
      "dropout_19 (Dropout)            (None, 200)          0           attention_mul[0][0]              \n",
      "__________________________________________________________________________________________________\n",
      "dense_56 (Dense)                (None, 100)          20100       dropout_19[0][0]                 \n",
      "__________________________________________________________________________________________________\n",
      "dense_57 (Dense)                (None, 3)            303         dense_56[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 2,092,303\n",
      "Trainable params: 2,092,303\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model = target_sentence()\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Model] Model Compile Done.\n",
      "处理：===俄罗斯叙利亚反恐行动===\n",
      "Found 5234 unique tokens.\n",
      "-----------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\sklearn\\metrics\\classification.py:1113: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 1 trainloss:1.0681414246559142 validscore:0.45833333333333337  validacc:0.46  testscore:0.5207093253968254  testacc:0.535\n",
      "Iter: 2 trainloss:0.9930684685707092 validscore:0.5162162162162162  validacc:0.49  testscore:0.5123287671232877  testacc:0.505\n",
      "Iter: 3 trainloss:0.9345085501670838 validscore:0.5869877785765636  validacc:0.55  testscore:0.5343406593406593  testacc:0.52\n",
      "Iter: 4 trainloss:0.871458637714386 validscore:0.6072489601901366  validacc:0.56  testscore:0.5527985337656844  testacc:0.525\n",
      "Iter: 5 trainloss:0.7932446956634521 validscore:0.6498866754145294  validacc:0.6  testscore:0.5148807780186302  testacc:0.49\n",
      "Iter: 6 trainloss:0.7261274814605713 validscore:0.5957142857142856  validacc:0.55  testscore:0.6147383309759548  testacc:0.58\n",
      "Iter: 7 trainloss:0.66246018409729 validscore:0.6851037851037851  validacc:0.64  testscore:0.5690992420236207  testacc:0.535\n",
      "Iter: 8 trainloss:0.5693915486335754 validscore:0.6632962588473205  validacc:0.62  testscore:0.5820105820105819  testacc:0.545\n",
      "Iter: 9 trainloss:0.49543884992599485 validscore:0.6504065040650406  validacc:0.62  testscore:0.5251118699952897  testacc:0.5\n",
      "Iter: 10 trainloss:0.4362851917743683 validscore:0.6738960020904102  validacc:0.64  testscore:0.5760368663594471  testacc:0.55\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.1182305097579956 validscore:0.5071622144792876  validacc:0.5  testscore:0.46604901374775853  testacc:0.465\n",
      "Iter: 2 trainloss:0.9954592704772949 validscore:0.5973389355742296  validacc:0.56  testscore:0.5501679731243001  testacc:0.52\n",
      "Iter: 3 trainloss:0.9374010562896729 validscore:0.6450046253469011  validacc:0.6  testscore:0.5950455005055613  testacc:0.565\n",
      "Iter: 4 trainloss:0.8715696692466736 validscore:0.6524170064065231  validacc:0.61  testscore:0.5978132884777123  testacc:0.57\n",
      "Iter: 5 trainloss:0.8349561214447021 validscore:0.666045428072219  validacc:0.62  testscore:0.6018472012947151  testacc:0.57\n",
      "Iter: 6 trainloss:0.7608198523521423 validscore:0.668697282099344  validacc:0.63  testscore:0.5968170511080959  testacc:0.565\n",
      "Iter: 7 trainloss:0.681762182712555 validscore:0.6448028673835126  validacc:0.61  testscore:0.5929543076386468  testacc:0.56\n",
      "Iter: 8 trainloss:0.6267576336860656 validscore:0.6523058252427185  validacc:0.62  testscore:0.6098128749216583  testacc:0.585\n",
      "Iter: 9 trainloss:0.5407331228256226 validscore:0.6578421578421578  validacc:0.63  testscore:0.5007496251874064  testacc:0.475\n",
      "Iter: 10 trainloss:0.4573064029216766 validscore:0.6727026361172702  validacc:0.63  testscore:0.5910249872514024  testacc:0.56\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0938642024993896 validscore:0.5962380220040222  validacc:0.56  testscore:0.5454331450094162  testacc:0.535\n",
      "Iter: 2 trainloss:0.9937166213989258 validscore:0.5615680615680616  validacc:0.53  testscore:0.5696329813976873  testacc:0.55\n",
      "Iter: 3 trainloss:0.9228653073310852 validscore:0.7093768200349447  validacc:0.66  testscore:0.6170846712372356  testacc:0.59\n",
      "Iter: 4 trainloss:0.8561493873596191 validscore:0.6858474082702388  validacc:0.64  testscore:0.6160636473531809  testacc:0.59\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-16-6bbbb40a40f1>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     26\u001b[0m             \u001b[1;32mfor\u001b[0m \u001b[0mx0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx1\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mlabel_iter\u001b[0m \u001b[1;32min\u001b[0m \u001b[0miter_data\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     27\u001b[0m         \u001b[1;31m#         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m                 \u001b[0mhistory\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_on_batch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mx0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mx1\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mlabel_iter\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     29\u001b[0m                 \u001b[0mnum\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlabel_iter\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     30\u001b[0m                 \u001b[0mloss\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtotal\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mhistory\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mnum\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtotal\u001b[0m\u001b[1;33m+\u001b[0m\u001b[0mnum\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mtrain_on_batch\u001b[1;34m(self, x, y, sample_weight, class_weight)\u001b[0m\n\u001b[0;32m   1847\u001b[0m             \u001b[0mins\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0my\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0msample_weights\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1848\u001b[0m         \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_make_train_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1849\u001b[1;33m         \u001b[0moutputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mins\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1850\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1851\u001b[0m             \u001b[1;32mreturn\u001b[0m \u001b[0moutputs\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\keras\\backend\\tensorflow_backend.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, inputs)\u001b[0m\n\u001b[0;32m   2473\u001b[0m         \u001b[0msession\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_session\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2474\u001b[0m         updated = session.run(fetches=fetches, feed_dict=feed_dict,\n\u001b[1;32m-> 2475\u001b[1;33m                               **self.session_kwargs)\n\u001b[0m\u001b[0;32m   2476\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mupdated\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2477\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    893\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    894\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 895\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    896\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    897\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1122\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1123\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[1;32m-> 1124\u001b[1;33m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[0;32m   1125\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1126\u001b[0m       \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_run\u001b[1;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1319\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1320\u001b[0m       return self._do_call(_run_fn, self._session, feeds, fetches, targets,\n\u001b[1;32m-> 1321\u001b[1;33m                            options, run_metadata)\n\u001b[0m\u001b[0;32m   1322\u001b[0m     \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1323\u001b[0m       \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_do_call\u001b[1;34m(self, fn, *args)\u001b[0m\n\u001b[0;32m   1325\u001b[0m   \u001b[1;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1326\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1327\u001b[1;33m       \u001b[1;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1328\u001b[0m     \u001b[1;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1329\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\program\\Lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[1;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[0;32m   1304\u001b[0m           return tf_session.TF_Run(session, options,\n\u001b[0;32m   1305\u001b[0m                                    \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1306\u001b[1;33m                                    status, run_metadata)\n\u001b[0m\u001b[0;32m   1307\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1308\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msession\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# target_sentence选择6轮最好的保存\n",
    "def data_generator(train_x0,train_x1,label,batch_size):\n",
    "    idx = np.random.permutation(len(label))\n",
    "    shuffled_train_x0,shuffled_train_x1,shuffled_train_y = train_x0[idx],train_x1[idx],label[idx]\n",
    "# num_batches = int(np.ceil(len(tweets) / float(batch_size))) 若除不开最后用\n",
    "    for i in range(int(len(label)//batch_size)):\n",
    "        yield shuffled_train_x0[i * batch_size:(i + 1) * batch_size], shuffled_train_x1[i * batch_size:(i + 1) * batch_size],shuffled_train_y[i * batch_size:(i + 1) * batch_size]\n",
    "\n",
    "print('[Model] Model Compile Done.', end='\\n')\n",
    "data_list=[[train_weibo_russia,test_weibo_russia],\n",
    "          [train_weibo_shenzhen,test_weibo_shenzhen],\n",
    "          [train_weibo_chunjie,test_weibo_chunjie],\n",
    "          [train_weibo_kaifang,test_weibo_kaifang],\n",
    "          [train_weibo_phone,test_weibo_phone]]\n",
    "\n",
    "for tar in data_list:\n",
    "    train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(tar[0],tar[1])\n",
    "    for num in range(3):\n",
    "        model = target_sentence()\n",
    "        opt = Adam(lr=0.001)\n",
    "        model.compile(optimizer=opt, loss='categorical_crossentropy')\n",
    "        print('-----------------------------------------')\n",
    "        for i_e in range(10):\n",
    "            iter_data = data_generator(train_data[0][:500],train_data[1][:500],train_data[2][:500],100)\n",
    "            loss,acc,total=0,0,0\n",
    "            for x0,x1,label_iter in iter_data:\n",
    "        #         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\n",
    "                history = model.train_on_batch([x0,x1],label_iter)\n",
    "                num = len(label_iter)\n",
    "                loss,total=loss+history*num,total+num\n",
    "        #         print('Iter:%d\\tloss=%.6f' % (i_e, history.history['loss'][0]), end='\\n')\n",
    "            loss=loss/total\n",
    "            v_pred = [i.argmax() for i in model.predict([train_data[0][500:],train_data[1][500:]])]\n",
    "            v_true = [i.argmax() for i in train_data[2][500:]]\n",
    "            y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "            y_true = [i.argmax() for i in test_data[2]]\n",
    "            valid_score = f1_score(v_true,v_pred)\n",
    "            valid_acc=np.mean(np.equal(v_pred,v_true),axis=-1)\n",
    "            test_score = f1_score(y_true,y_pred)\n",
    "            test_acc=np.mean(np.equal(y_pred,y_true),axis=-1)\n",
    "            print('Iter: {} trainloss:{} validscore:{}  validacc:{}  testscore:{}  testacc:{}' .format (i_e+1,loss,valid_score,valid_acc,test_score,test_acc),end='\\n')\n",
    "        del model\n",
    "    print('**********************************************')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 0401"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Model] Model Compile Done.\n",
      "处理：===俄罗斯叙利亚反恐行动===\n",
      "Found 5234 unique tokens.\n",
      "-----------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\sklearn\\metrics\\classification.py:1113: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 1 trainloss:1.079345989227295 validscore:0.4994192799070847  validacc:0.5  testscore:0.3846255579434617  testacc:0.435\n",
      "Iter: 2 trainloss:0.9867422342300415 validscore:0.4455061983471074  validacc:0.45  testscore:0.4892861442157217  testacc:0.485\n",
      "Iter: 3 trainloss:0.924598228931427 validscore:0.6594117647058824  validacc:0.61  testscore:0.5661453334086162  testacc:0.535\n",
      "Iter: 4 trainloss:0.8400780916213989 validscore:0.6917727487034419  validacc:0.64  testscore:0.5691568047337279  testacc:0.54\n",
      "Iter: 5 trainloss:0.7789839029312133 validscore:0.673983060956698  validacc:0.63  testscore:0.6014574152481349  testacc:0.57\n",
      "Iter: 6 trainloss:0.737649428844452 validscore:0.664030612244898  validacc:0.62  testscore:0.5652173913043479  testacc:0.54\n",
      "Iter: 7 trainloss:0.6797959089279175 validscore:0.6820551005212212  validacc:0.64  testscore:0.6096103896103896  testacc:0.58\n",
      "Iter: 8 trainloss:0.603106951713562 validscore:0.6859078590785908  validacc:0.63  testscore:0.6170309653916212  testacc:0.585\n",
      "Iter: 9 trainloss:0.5297667026519776 validscore:0.6881720430107527  validacc:0.65  testscore:0.6069702144904043  testacc:0.575\n",
      "Iter: 10 trainloss:0.44768970012664794 validscore:0.677428530792206  validacc:0.64  testscore:0.5901116427432216  testacc:0.55\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0697617411613465 validscore:0.5431286549707601  validacc:0.52  testscore:0.4790627362055933  testacc:0.485\n",
      "Iter: 2 trainloss:0.978084659576416 validscore:0.6077601410934743  validacc:0.57  testscore:0.5314401622718052  testacc:0.505\n",
      "Iter: 3 trainloss:0.9212903261184693 validscore:0.6010483401281305  validacc:0.56  testscore:0.5393970247185927  testacc:0.515\n",
      "Iter: 4 trainloss:0.8671379327774048 validscore:0.6144507307873645  validacc:0.58  testscore:0.5521642302421197  testacc:0.53\n",
      "Iter: 5 trainloss:0.811592984199524 validscore:0.6180365024454253  validacc:0.58  testscore:0.5178912628104915  testacc:0.495\n",
      "Iter: 6 trainloss:0.748198390007019 validscore:0.6623160892263882  validacc:0.62  testscore:0.533331463667723  testacc:0.51\n",
      "Iter: 7 trainloss:0.6518038868904114 validscore:0.629457743038593  validacc:0.6  testscore:0.5286902286902286  testacc:0.505\n",
      "Iter: 8 trainloss:0.595949113368988 validscore:0.6798679867986799  validacc:0.64  testscore:0.49802190241385247  testacc:0.48\n",
      "Iter: 9 trainloss:0.5176861464977265 validscore:0.6839304257528558  validacc:0.66  testscore:0.5369436798317508  testacc:0.525\n",
      "Iter: 10 trainloss:0.4383012592792511 validscore:0.6515151515151515  validacc:0.63  testscore:0.5177903177903178  testacc:0.495\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0994802832603454 validscore:0.5398860398860399  validacc:0.51  testscore:0.5360125106960549  testacc:0.53\n",
      "Iter: 2 trainloss:0.9918899297714233 validscore:0.5869147659063626  validacc:0.56  testscore:0.5726932243786176  testacc:0.54\n",
      "Iter: 3 trainloss:0.9689088821411133 validscore:0.6024291497975709  validacc:0.56  testscore:0.568200116605125  testacc:0.54\n",
      "Iter: 4 trainloss:0.881926691532135 validscore:0.6072055211135805  validacc:0.57  testscore:0.5975544922913345  testacc:0.57\n",
      "Iter: 5 trainloss:0.8517689228057861 validscore:0.594224924012158  validacc:0.56  testscore:0.6212058212058212  testacc:0.59\n",
      "Iter: 6 trainloss:0.7903738021850586 validscore:0.576509511993383  validacc:0.54  testscore:0.5776556776556776  testacc:0.545\n",
      "Iter: 7 trainloss:0.7059008836746216 validscore:0.6756530074287084  validacc:0.64  testscore:0.6081543544961058  testacc:0.58\n",
      "Iter: 8 trainloss:0.6502672672271729 validscore:0.5803678051110581  validacc:0.55  testscore:0.5338339274235961  testacc:0.52\n",
      "Iter: 9 trainloss:0.582920515537262 validscore:0.6606334841628959  validacc:0.62  testscore:0.5742923017804534  testacc:0.545\n",
      "Iter: 10 trainloss:0.5038073837757111 validscore:0.6644689522896188  validacc:0.62  testscore:0.5965213721589196  testacc:0.565\n",
      "**********************************************\n",
      "处理：===深圳禁摩限电===\n",
      "Found 5763 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0424154639244079 validscore:0.5709677419354839  validacc:0.61  testscore:0.6890019071837253  testacc:0.675\n",
      "Iter: 2 trainloss:0.8626936078071594 validscore:0.6227530164983994  validacc:0.69  testscore:0.7324854975457384  testacc:0.74\n",
      "Iter: 3 trainloss:0.7827513813972473 validscore:0.6230492196878752  validacc:0.65  testscore:0.7806767223807582  testacc:0.74\n",
      "Iter: 4 trainloss:0.7090191960334777 validscore:0.662557781201849  validacc:0.69  testscore:0.7893931667516572  testacc:0.745\n",
      "Iter: 5 trainloss:0.635242509841919 validscore:0.7124836885602437  validacc:0.72  testscore:0.7811559009227781  testacc:0.74\n",
      "Iter: 6 trainloss:0.5494245827198029 validscore:0.7541666666666667  validacc:0.74  testscore:0.7776935707970191  testacc:0.75\n",
      "Iter: 7 trainloss:0.48636962175369264 validscore:0.74798873692679  validacc:0.75  testscore:0.7911662272564528  testacc:0.765\n",
      "Iter: 8 trainloss:0.44504683613777163 validscore:0.6928350287483415  validacc:0.71  testscore:0.807838672954952  testacc:0.78\n",
      "Iter: 9 trainloss:0.3510596752166748 validscore:0.6927939317319848  validacc:0.71  testscore:0.8085148255051168  testacc:0.78\n",
      "Iter: 10 trainloss:0.2959679126739502 validscore:0.7212885154061625  validacc:0.73  testscore:0.8222099590988768  testacc:0.795\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.032001519203186 validscore:0.5681818181818182  validacc:0.62  testscore:0.6959087332808812  testacc:0.69\n",
      "Iter: 2 trainloss:0.8516646265983582 validscore:0.675544794188862  validacc:0.68  testscore:0.7766963292547275  testacc:0.77\n",
      "Iter: 3 trainloss:0.764869999885559 validscore:0.6989374262101533  validacc:0.71  testscore:0.7993859082094376  testacc:0.765\n",
      "Iter: 4 trainloss:0.6848967432975769 validscore:0.7176724137931034  validacc:0.7  testscore:0.7765188633023028  testacc:0.735\n",
      "Iter: 5 trainloss:0.618578577041626 validscore:0.7022497704315886  validacc:0.71  testscore:0.8139222244081579  testacc:0.785\n",
      "Iter: 6 trainloss:0.5527279436588287 validscore:0.762962962962963  validacc:0.74  testscore:0.7743859649122808  testacc:0.745\n",
      "Iter: 7 trainloss:0.5013615131378174 validscore:0.7723970944309928  validacc:0.75  testscore:0.802593843604614  testacc:0.77\n",
      "Iter: 8 trainloss:0.42457444667816163 validscore:0.7639315489249672  validacc:0.73  testscore:0.7983218459694337  testacc:0.78\n",
      "Iter: 9 trainloss:0.3802937626838684 validscore:0.8083441981747066  validacc:0.77  testscore:0.8077733860342555  testacc:0.785\n",
      "Iter: 10 trainloss:0.32206923961639405 validscore:0.814171383954714  validacc:0.78  testscore:0.7818016614745587  testacc:0.76\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0266544938087463 validscore:0.6104896387309737  validacc:0.66  testscore:0.7327135578640906  testacc:0.715\n",
      "Iter: 2 trainloss:0.8459327697753907 validscore:0.6792375486780078  validacc:0.69  testscore:0.7646680804575541  testacc:0.74\n",
      "Iter: 3 trainloss:0.7660321116447448 validscore:0.6858453199916614  validacc:0.7  testscore:0.7960105149219112  testacc:0.76\n",
      "Iter: 4 trainloss:0.6733422160148621 validscore:0.7134387351778655  validacc:0.72  testscore:0.7970402984161928  testacc:0.76\n",
      "Iter: 5 trainloss:0.604808223247528 validscore:0.7666666666666666  validacc:0.76  testscore:0.8194444444444445  testacc:0.785\n",
      "Iter: 6 trainloss:0.564648711681366 validscore:0.7181818181818183  validacc:0.71  testscore:0.8097069768896394  testacc:0.775\n",
      "Iter: 7 trainloss:0.4788209795951843 validscore:0.7431728163435479  validacc:0.72  testscore:0.8281220745178806  testacc:0.79\n",
      "Iter: 8 trainloss:0.4374067187309265 validscore:0.7352564102564103  validacc:0.72  testscore:0.8302257737189244  testacc:0.795\n",
      "Iter: 9 trainloss:0.35567633509635926 validscore:0.7165165165165166  validacc:0.7  testscore:0.8108472400513478  testacc:0.785\n",
      "Iter: 10 trainloss:0.3080208659172058 validscore:0.7238528317486566  validacc:0.71  testscore:0.8234161988773054  testacc:0.795\n",
      "**********************************************\n",
      "处理：===春节放鞭炮===\n",
      "Found 7460 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0186649918556214 validscore:0.6319693094629155  validacc:0.57  testscore:0.7882099685378373  testacc:0.75\n",
      "Iter: 2 trainloss:0.7871648073196411 validscore:0.6675195259897657  validacc:0.63  testscore:0.7964631673800406  testacc:0.755\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 3 trainloss:0.6851068139076233 validscore:0.6472868217054264  validacc:0.59  testscore:0.7883597883597884  testacc:0.755\n",
      "Iter: 4 trainloss:0.6232958197593689 validscore:0.6730333899264289  validacc:0.65  testscore:0.781121281464531  testacc:0.74\n",
      "Iter: 5 trainloss:0.5627202749252319 validscore:0.6587779690189329  validacc:0.64  testscore:0.7564171282234531  testacc:0.71\n",
      "Iter: 6 trainloss:0.4865653574466705 validscore:0.682933789954338  validacc:0.66  testscore:0.7588126159554731  testacc:0.715\n",
      "Iter: 7 trainloss:0.4407429933547974 validscore:0.6856060606060606  validacc:0.67  testscore:0.7591836734693878  testacc:0.715\n",
      "Iter: 8 trainloss:0.3839448928833008 validscore:0.6785714285714286  validacc:0.66  testscore:0.7858318152435799  testacc:0.74\n",
      "Iter: 9 trainloss:0.33065887689590456 validscore:0.6683389074693422  validacc:0.65  testscore:0.7925241251017323  testacc:0.75\n",
      "Iter: 10 trainloss:0.2670221358537674 validscore:0.6767734553775744  validacc:0.65  testscore:0.7859283196239718  testacc:0.74\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.038443660736084 validscore:0.5647435897435897  validacc:0.51  testscore:0.6395529157035433  testacc:0.63\n",
      "Iter: 2 trainloss:0.8356214761734009 validscore:0.6303280805993283  validacc:0.57  testscore:0.7988397790055248  testacc:0.765\n",
      "Iter: 3 trainloss:0.7169528245925904 validscore:0.639036341363822  validacc:0.6  testscore:0.7893031731798329  testacc:0.75\n",
      "Iter: 4 trainloss:0.6403180897235871 validscore:0.635754489413026  validacc:0.6  testscore:0.7909610983981694  testacc:0.755\n",
      "Iter: 5 trainloss:0.5727341294288635 validscore:0.6576576576576576  validacc:0.64  testscore:0.797983027202976  testacc:0.76\n",
      "Iter: 6 trainloss:0.531230115890503 validscore:0.673992673992674  validacc:0.65  testscore:0.7798882681564245  testacc:0.735\n",
      "Iter: 7 trainloss:0.48692049980163576 validscore:0.6730333899264289  validacc:0.65  testscore:0.7990665248729766  testacc:0.755\n",
      "Iter: 8 trainloss:0.40960941314697263 validscore:0.6838487972508591  validacc:0.67  testscore:0.7958333333333334  testacc:0.75\n",
      "Iter: 9 trainloss:0.3642734944820404 validscore:0.6458333333333334  validacc:0.62  testscore:0.7884361101093882  testacc:0.745\n",
      "Iter: 10 trainloss:0.3119662404060364 validscore:0.6635304659498209  validacc:0.65  testscore:0.8207817531834432  testacc:0.78\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:0.9893684983253479 validscore:0.6435185185185186  validacc:0.58  testscore:0.7316861360898674  testacc:0.705\n",
      "Iter: 2 trainloss:0.8135529160499573 validscore:0.6287012987012986  validacc:0.57  testscore:0.7627406253477245  testacc:0.725\n",
      "Iter: 3 trainloss:0.6993507027626038 validscore:0.6685956790123457  validacc:0.6  testscore:0.754088669950739  testacc:0.72\n",
      "Iter: 4 trainloss:0.6418256759643555 validscore:0.672381711855396  validacc:0.62  testscore:0.7853658536585366  testacc:0.75\n",
      "Iter: 5 trainloss:0.56074960231781 validscore:0.6827309236947792  validacc:0.63  testscore:0.7669718330457165  testacc:0.725\n",
      "Iter: 6 trainloss:0.5088004827499389 validscore:0.6825700615174299  validacc:0.64  testscore:0.7596820463781975  testacc:0.715\n",
      "Iter: 7 trainloss:0.4627955675125122 validscore:0.6590567327409433  validacc:0.62  testscore:0.7620208220497082  testacc:0.715\n",
      "Iter: 8 trainloss:0.4050999104976654 validscore:0.7380952380952381  validacc:0.71  testscore:0.7738477072938941  testacc:0.735\n",
      "Iter: 9 trainloss:0.3379832744598389 validscore:0.729235880398671  validacc:0.68  testscore:0.7838169545016218  testacc:0.745\n",
      "Iter: 10 trainloss:0.2996082425117493 validscore:0.7591866376180102  validacc:0.72  testscore:0.7520491803278688  testacc:0.71\n",
      "**********************************************\n",
      "处理：===开放二胎===\n",
      "Found 7357 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.1034247398376464 validscore:0.5338725985844288  validacc:0.56  testscore:0.6047397376216672  testacc:0.625\n",
      "Iter: 2 trainloss:0.9288331508636475 validscore:0.696969696969697  validacc:0.64  testscore:0.7047619047619047  testacc:0.685\n",
      "Iter: 3 trainloss:0.8881831526756286 validscore:0.765914585012087  validacc:0.71  testscore:0.7846097201767305  testacc:0.77\n",
      "Iter: 4 trainloss:0.8259589672088623 validscore:0.743859649122807  validacc:0.7  testscore:0.7837185236288375  testacc:0.77\n",
      "Iter: 5 trainloss:0.7383797526359558 validscore:0.7429288924134285  validacc:0.69  testscore:0.7883107626514612  testacc:0.775\n",
      "Iter: 6 trainloss:0.7091877460479736 validscore:0.7627705627705628  validacc:0.71  testscore:0.8113228420499488  testacc:0.785\n",
      "Iter: 7 trainloss:0.6469255685806274 validscore:0.7633495145631068  validacc:0.71  testscore:0.8008345369924572  testacc:0.785\n",
      "Iter: 8 trainloss:0.5774100542068481 validscore:0.7648970747562296  validacc:0.71  testscore:0.8180766601819234  testacc:0.81\n",
      "Iter: 9 trainloss:0.5136447250843048 validscore:0.7912895927601811  validacc:0.74  testscore:0.82504889178618  testacc:0.805\n",
      "Iter: 10 trainloss:0.44344051480293273 validscore:0.7648970747562296  validacc:0.71  testscore:0.8182367149758454  testacc:0.81\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0829459190368653 validscore:0.6794871794871795  validacc:0.61  testscore:0.7062078272604587  testacc:0.685\n",
      "Iter: 2 trainloss:0.9368091344833374 validscore:0.7437938478143551  validacc:0.7  testscore:0.7401996847083552  testacc:0.73\n",
      "Iter: 3 trainloss:0.8664407372474671 validscore:0.739141810570382  validacc:0.69  testscore:0.7805544709098009  testacc:0.765\n",
      "Iter: 4 trainloss:0.7939025044441224 validscore:0.7363782051282051  validacc:0.68  testscore:0.7950089126559714  testacc:0.77\n",
      "Iter: 5 trainloss:0.7382448315620422 validscore:0.7355096602265156  validacc:0.68  testscore:0.7938144329896908  testacc:0.775\n",
      "Iter: 6 trainloss:0.664513111114502 validscore:0.7312280701754386  validacc:0.66  testscore:0.7897991143752026  testacc:0.765\n",
      "Iter: 7 trainloss:0.5981192231178284 validscore:0.726063829787234  validacc:0.65  testscore:0.820089146296454  testacc:0.775\n",
      "Iter: 8 trainloss:0.5477291584014893 validscore:0.7372099524741405  validacc:0.67  testscore:0.7906043956043955  testacc:0.76\n",
      "Iter: 9 trainloss:0.46785285472869875 validscore:0.7423469387755103  validacc:0.67  testscore:0.8164690457188462  testacc:0.77\n",
      "Iter: 10 trainloss:0.4043764412403107 validscore:0.7414285714285714  validacc:0.67  testscore:0.8162451583504216  testacc:0.775\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0975040674209595 validscore:0.674871639987919  validacc:0.61  testscore:0.7117963241243788  testacc:0.685\n",
      "Iter: 2 trainloss:0.9513358235359192 validscore:0.7300431499460626  validacc:0.68  testscore:0.7322789732953072  testacc:0.725\n",
      "Iter: 3 trainloss:0.8538786888122558 validscore:0.7461006374610064  validacc:0.7  testscore:0.7561340206185567  testacc:0.745\n",
      "Iter: 4 trainloss:0.7781737446784973 validscore:0.7472497249724972  validacc:0.7  testscore:0.7670247046186895  testacc:0.745\n",
      "Iter: 5 trainloss:0.7150965094566345 validscore:0.7583540458436895  validacc:0.71  testscore:0.7935054773082942  testacc:0.78\n",
      "Iter: 6 trainloss:0.6457596898078919 validscore:0.7294481981981982  validacc:0.67  testscore:0.7756892230576442  testacc:0.75\n",
      "Iter: 7 trainloss:0.6208247065544128 validscore:0.7294481981981982  validacc:0.67  testscore:0.8030795689678338  testacc:0.785\n",
      "Iter: 8 trainloss:0.5354012072086334 validscore:0.7291788002329644  validacc:0.67  testscore:0.8261525204653166  testacc:0.81\n",
      "Iter: 9 trainloss:0.4834821820259094 validscore:0.723511322337154  validacc:0.67  testscore:0.8202102157477411  testacc:0.81\n",
      "Iter: 10 trainloss:0.43611316084861756 validscore:0.7078260869565216  validacc:0.65  testscore:0.7959762051742523  testacc:0.765\n",
      "**********************************************\n",
      "处理：===iphone se===\n",
      "Found 4780 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0899099349975585 validscore:0.5127694859038142  validacc:0.5  testscore:0.46107358691788125  testacc:0.45\n",
      "Iter: 2 trainloss:0.9520479917526246 validscore:0.5540691192865106  validacc:0.53  testscore:0.4647786980140707  testacc:0.45\n",
      "Iter: 3 trainloss:0.8688048720359802 validscore:0.5728802794537948  validacc:0.55  testscore:0.5182724252491695  testacc:0.475\n",
      "Iter: 4 trainloss:0.8005387663841248 validscore:0.4940222897669706  validacc:0.52  testscore:0.5143866258535437  testacc:0.445\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 5 trainloss:0.7501904368400574 validscore:0.5732165206508135  validacc:0.55  testscore:0.5307051782461618  testacc:0.48\n",
      "Iter: 6 trainloss:0.6732471346855163 validscore:0.5485638699924414  validacc:0.54  testscore:0.5095269693809841  testacc:0.44\n",
      "Iter: 7 trainloss:0.6127538919448853 validscore:0.5642403517342451  validacc:0.55  testscore:0.5227963525835866  testacc:0.475\n",
      "Iter: 8 trainloss:0.5559304654598236 validscore:0.5378787878787878  validacc:0.52  testscore:0.5193036711891461  testacc:0.465\n",
      "Iter: 9 trainloss:0.48858230710029604 validscore:0.5163003663003664  validacc:0.53  testscore:0.5808810649851113  testacc:0.51\n",
      "Iter: 10 trainloss:0.40646747350692747 validscore:0.5692307692307692  validacc:0.56  testscore:0.5313156788566624  testacc:0.475\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.1288400053977967 validscore:0.5046528274874732  validacc:0.53  testscore:0.46673558542034876  testacc:0.45\n",
      "Iter: 2 trainloss:0.9619181752204895 validscore:0.5125  validacc:0.51  testscore:0.4764285714285714  testacc:0.465\n",
      "Iter: 3 trainloss:0.8607441663742066 validscore:0.5260869565217391  validacc:0.54  testscore:0.4955715524064373  testacc:0.465\n",
      "Iter: 4 trainloss:0.8385169982910157 validscore:0.5204274041483343  validacc:0.51  testscore:0.5436105476673427  testacc:0.52\n",
      "Iter: 5 trainloss:0.7614733457565308 validscore:0.5177570093457944  validacc:0.53  testscore:0.5373510156118853  testacc:0.475\n",
      "Iter: 6 trainloss:0.7223566174507141 validscore:0.49234693877551017  validacc:0.5  testscore:0.5661361626878868  testacc:0.505\n",
      "Iter: 7 trainloss:0.6515438199043274 validscore:0.5807907845579079  validacc:0.54  testscore:0.5683060109289617  testacc:0.505\n",
      "Iter: 8 trainloss:0.5919605553150177 validscore:0.5291529152915291  validacc:0.54  testscore:0.5564814814814815  testacc:0.485\n",
      "Iter: 9 trainloss:0.5412874341011047 validscore:0.5609679446888749  validacc:0.55  testscore:0.586781206171108  testacc:0.525\n",
      "Iter: 10 trainloss:0.46980578303337095 validscore:0.5069306930693069  validacc:0.54  testscore:0.5783972125435539  testacc:0.495\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0869163036346436 validscore:0.5408748114630467  validacc:0.5  testscore:0.4747405953287128  testacc:0.48\n",
      "Iter: 2 trainloss:0.96306893825531 validscore:0.5082125603864734  validacc:0.52  testscore:0.48004079254079257  testacc:0.44\n",
      "Iter: 3 trainloss:0.893024230003357 validscore:0.5500614250614251  validacc:0.52  testscore:0.507563025210084  testacc:0.495\n",
      "Iter: 4 trainloss:0.8123085021972656 validscore:0.518762088974855  validacc:0.53  testscore:0.5414811149595911  testacc:0.485\n",
      "Iter: 5 trainloss:0.7798158764839173 validscore:0.5009973404255319  validacc:0.49  testscore:0.5211427572951248  testacc:0.49\n",
      "Iter: 6 trainloss:0.7115189075469971 validscore:0.572684458398744  validacc:0.56  testscore:0.5534285714285715  testacc:0.515\n",
      "Iter: 7 trainloss:0.631514060497284 validscore:0.5267857142857143  validacc:0.55  testscore:0.5038314176245211  testacc:0.45\n",
      "Iter: 8 trainloss:0.5787579774856567 validscore:0.5446969696969697  validacc:0.58  testscore:0.5675287356321839  testacc:0.495\n",
      "Iter: 9 trainloss:0.5239997923374176 validscore:0.5099999999999999  validacc:0.54  testscore:0.558408215661104  testacc:0.49\n",
      "Iter: 10 trainloss:0.45260931849479674 validscore:0.6115569823434992  validacc:0.61  testscore:0.5191869300911853  testacc:0.475\n",
      "**********************************************\n"
     ]
    }
   ],
   "source": [
    "# target_sentence选择6轮最好的保存\n",
    "def data_generator(train_x0,train_x1,label,batch_size):\n",
    "    idx = np.random.permutation(len(label))\n",
    "    shuffled_train_x0,shuffled_train_x1,shuffled_train_y = train_x0[idx],train_x1[idx],label[idx]\n",
    "# num_batches = int(np.ceil(len(tweets) / float(batch_size))) 若除不开最后用\n",
    "    for i in range(int(len(label)//batch_size)):\n",
    "        yield shuffled_train_x0[i * batch_size:(i + 1) * batch_size], shuffled_train_x1[i * batch_size:(i + 1) * batch_size],shuffled_train_y[i * batch_size:(i + 1) * batch_size]\n",
    "\n",
    "print('[Model] Model Compile Done.', end='\\n')\n",
    "data_list=[[train_weibo_russia,test_weibo_russia],\n",
    "          [train_weibo_shenzhen,test_weibo_shenzhen],\n",
    "          [train_weibo_chunjie,test_weibo_chunjie],\n",
    "          [train_weibo_kaifang,test_weibo_kaifang],\n",
    "          [train_weibo_phone,test_weibo_phone]]\n",
    "\n",
    "for tar in data_list:\n",
    "    train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(tar[0],tar[1])\n",
    "    for num in range(3):\n",
    "        model = target_sentence()\n",
    "        opt = Adam(lr=0.001)\n",
    "        model.compile(optimizer=opt, loss='categorical_crossentropy')\n",
    "        print('-----------------------------------------')\n",
    "        for i_e in range(10):\n",
    "            iter_data = data_generator(train_data[0][:500],train_data[1][:500],train_data[2][:500],100)\n",
    "            loss,acc,total=0,0,0\n",
    "            for x0,x1,label_iter in iter_data:\n",
    "        #         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\n",
    "                history = model.train_on_batch([x0,x1],label_iter)\n",
    "                num = len(label_iter)\n",
    "                loss,total=loss+history*num,total+num\n",
    "        #         print('Iter:%d\\tloss=%.6f' % (i_e, history.history['loss'][0]), end='\\n')\n",
    "            loss=loss/total\n",
    "            v_pred = [i.argmax() for i in model.predict([train_data[0][500:],train_data[1][500:]])]\n",
    "            v_true = [i.argmax() for i in train_data[2][500:]]\n",
    "            y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "            y_true = [i.argmax() for i in test_data[2]]\n",
    "            valid_score = f1_score(v_true,v_pred)\n",
    "            valid_acc=np.mean(np.equal(v_pred,v_true),axis=-1)\n",
    "            test_score = f1_score(y_true,y_pred)\n",
    "            test_acc=np.mean(np.equal(y_pred,y_true),axis=-1)\n",
    "            print('Iter: {} trainloss:{} validscore:{}  validacc:{}  testscore:{}  testacc:{}' .format (i_e+1,loss,valid_score,valid_acc,test_score,test_acc),end='\\n')\n",
    "        del model\n",
    "    print('**********************************************')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Model] Model Compile Done.\n",
      "处理：===俄罗斯叙利亚反恐行动===\n",
      "Found 5234 unique tokens.\n",
      "-----------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\sklearn\\metrics\\classification.py:1113: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 1 trainloss:1.073236310482025 validscore:0.46352941176470586  validacc:0.51  testscore:0.4621410777500983  testacc:0.5\n",
      "Iter: 2 trainloss:0.9784598469734191 validscore:0.5998082913970765  validacc:0.56  testscore:0.5565862708719851  testacc:0.54\n",
      "Iter: 3 trainloss:0.9264754056930542 validscore:0.6258823529411766  validacc:0.58  testscore:0.582271390561546  testacc:0.55\n",
      "Iter: 4 trainloss:0.8743132710456848 validscore:0.6486676016830294  validacc:0.6  testscore:0.5978835978835979  testacc:0.565\n",
      "Iter: 5 trainloss:0.8106701850891114 validscore:0.6373307543520309  validacc:0.59  testscore:0.5637804187889077  testacc:0.535\n",
      "Iter: 6 trainloss:0.7685769915580749 validscore:0.5934782608695652  validacc:0.55  testscore:0.5593780258586319  testacc:0.535\n",
      "Iter: 7 trainloss:0.6842665314674378 validscore:0.6588939857288482  validacc:0.62  testscore:0.553595911753708  testacc:0.54\n",
      "Iter: 8 trainloss:0.6109845638275146 validscore:0.6518971848225213  validacc:0.61  testscore:0.5450837710161087  testacc:0.52\n",
      "Iter: 9 trainloss:0.5219684898853302 validscore:0.6819783374657444  validacc:0.64  testscore:0.5513784461152882  testacc:0.525\n",
      "Iter: 10 trainloss:0.44259980916976926 validscore:0.670071105365223  validacc:0.65  testscore:0.551246193051153  testacc:0.52\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0838064670562744 validscore:0.506578947368421  validacc:0.49  testscore:0.4619392553056297  testacc:0.49\n",
      "Iter: 2 trainloss:1.00119446516037 validscore:0.5369075369075369  validacc:0.54  testscore:0.5286498288681695  testacc:0.515\n",
      "Iter: 3 trainloss:0.9628291487693786 validscore:0.6061253561253561  validacc:0.57  testscore:0.5503921858813083  testacc:0.535\n",
      "Iter: 4 trainloss:0.8898890376091003 validscore:0.6004876349703936  validacc:0.56  testscore:0.5879045949180374  testacc:0.56\n",
      "Iter: 5 trainloss:0.8420621275901794 validscore:0.6521739130434783  validacc:0.61  testscore:0.5698807932263814  testacc:0.54\n",
      "Iter: 6 trainloss:0.764355731010437 validscore:0.6666666666666667  validacc:0.62  testscore:0.5688429217840982  testacc:0.53\n",
      "Iter: 7 trainloss:0.7046448349952698 validscore:0.6993526732198514  validacc:0.65  testscore:0.6024623496087724  testacc:0.57\n",
      "Iter: 8 trainloss:0.6247093081474304 validscore:0.7106782106782106  validacc:0.66  testscore:0.5924813800818394  testacc:0.555\n",
      "Iter: 9 trainloss:0.5349543690681458 validscore:0.6999010390895596  validacc:0.64  testscore:0.5753273096072284  testacc:0.54\n",
      "Iter: 10 trainloss:0.4671634495258331 validscore:0.6973490427098674  validacc:0.64  testscore:0.5477370132830786  testacc:0.515\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0980166912078857 validscore:0.4675030028026158  validacc:0.49  testscore:0.5171367636898861  testacc:0.52\n",
      "Iter: 2 trainloss:0.9847490787506104 validscore:0.5586798522578339  validacc:0.53  testscore:0.5727689803167086  testacc:0.555\n",
      "Iter: 3 trainloss:0.9297596216201782 validscore:0.6509803921568627  validacc:0.6  testscore:0.6184502474825055  testacc:0.58\n",
      "Iter: 4 trainloss:0.8792092561721802 validscore:0.6330532212885154  validacc:0.59  testscore:0.631811032468262  testacc:0.6\n",
      "Iter: 5 trainloss:0.8227450847625732 validscore:0.6097883597883598  validacc:0.57  testscore:0.5741379310344827  testacc:0.555\n",
      "Iter: 6 trainloss:0.7476096987724304 validscore:0.6406025824964132  validacc:0.59  testscore:0.5609734923023497  testacc:0.53\n",
      "Iter: 7 trainloss:0.6935790061950684 validscore:0.6454545454545455  validacc:0.61  testscore:0.5953459889773423  testacc:0.575\n",
      "Iter: 8 trainloss:0.6078333616256714 validscore:0.6593406593406594  validacc:0.61  testscore:0.5846273291925466  testacc:0.55\n",
      "Iter: 9 trainloss:0.5424538850784302 validscore:0.6385388825251368  validacc:0.6  testscore:0.5841539160733222  testacc:0.55\n",
      "Iter: 10 trainloss:0.4368204355239868 validscore:0.6458333333333333  validacc:0.6  testscore:0.5652254249815225  testacc:0.53\n",
      "**********************************************\n",
      "处理：===深圳禁摩限电===\n",
      "Found 5763 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0197321891784668 validscore:0.5788950715421303  validacc:0.65  testscore:0.6967046589304776  testacc:0.7\n",
      "Iter: 2 trainloss:0.8823965311050415 validscore:0.6727272727272727  validacc:0.71  testscore:0.7746313903817004  testacc:0.765\n",
      "Iter: 3 trainloss:0.7785052537918091 validscore:0.723813184701134  validacc:0.74  testscore:0.7832950191570881  testacc:0.75\n",
      "Iter: 4 trainloss:0.7055797696113586 validscore:0.7479674796747967  validacc:0.73  testscore:0.8092228919567048  testacc:0.79\n",
      "Iter: 5 trainloss:0.627422297000885 validscore:0.7981799797775531  validacc:0.77  testscore:0.7968009478672987  testacc:0.77\n",
      "Iter: 6 trainloss:0.5566807150840759 validscore:0.7375474648201921  validacc:0.73  testscore:0.7825163398692812  testacc:0.765\n",
      "Iter: 7 trainloss:0.47962483763694763 validscore:0.7611179110567114  validacc:0.73  testscore:0.7887730553327987  testacc:0.77\n",
      "Iter: 8 trainloss:0.4173633694648743 validscore:0.7786774628879891  validacc:0.74  testscore:0.8145396222709567  testacc:0.795\n",
      "Iter: 9 trainloss:0.3649707317352295 validscore:0.8064021468276787  validacc:0.76  testscore:0.7896461543309072  testacc:0.77\n",
      "Iter: 10 trainloss:0.3018660247325897 validscore:0.7139639639639639  validacc:0.72  testscore:0.785502763885233  testacc:0.775\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:0.9826559543609619 validscore:0.5405797101449276  validacc:0.61  testscore:0.715764705882353  testacc:0.715\n",
      "Iter: 2 trainloss:0.8440631747245788 validscore:0.6098191214470284  validacc:0.67  testscore:0.7492076615681411  testacc:0.74\n",
      "Iter: 3 trainloss:0.755743408203125 validscore:0.7216905901116428  validacc:0.73  testscore:0.792583857442348  testacc:0.77\n",
      "Iter: 4 trainloss:0.6828086853027344 validscore:0.7204345152695224  validacc:0.72  testscore:0.7931188276521186  testacc:0.765\n",
      "Iter: 5 trainloss:0.6091127038002014 validscore:0.7226386806596702  validacc:0.71  testscore:0.7839262187088275  testacc:0.75\n",
      "Iter: 6 trainloss:0.5567222774028778 validscore:0.7366946778711484  validacc:0.73  testscore:0.8027210884353742  testacc:0.77\n",
      "Iter: 7 trainloss:0.4530222415924072 validscore:0.7401129943502825  validacc:0.73  testscore:0.7969924812030076  testacc:0.765\n",
      "Iter: 8 trainloss:0.41836808919906615 validscore:0.7553235053235052  validacc:0.74  testscore:0.7907511497189575  testacc:0.755\n",
      "Iter: 9 trainloss:0.34213619828224184 validscore:0.736764705882353  validacc:0.73  testscore:0.8050481988838153  testacc:0.775\n",
      "Iter: 10 trainloss:0.2894519090652466 validscore:0.7209618874773139  validacc:0.72  testscore:0.8019740497434389  testacc:0.77\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0216598510742188 validscore:0.5726997349488829  validacc:0.63  testscore:0.704868154158215  testacc:0.705\n",
      "Iter: 2 trainloss:0.8536266088485718 validscore:0.6903010033444816  validacc:0.69  testscore:0.7624943863475974  testacc:0.73\n",
      "Iter: 3 trainloss:0.759924852848053 validscore:0.6687077702702702  validacc:0.7  testscore:0.7717319594106374  testacc:0.745\n",
      "Iter: 4 trainloss:0.6796455264091492 validscore:0.701639344262295  validacc:0.7  testscore:0.789922480620155  testacc:0.755\n",
      "Iter: 5 trainloss:0.6126033663749695 validscore:0.7521367521367521  validacc:0.75  testscore:0.8020117455954017  testacc:0.76\n",
      "Iter: 6 trainloss:0.5482382357120514 validscore:0.7094017094017093  validacc:0.72  testscore:0.8104573123146559  testacc:0.77\n",
      "Iter: 7 trainloss:0.5005090773105622 validscore:0.6975102192493496  validacc:0.7  testscore:0.8043043043043043  testacc:0.765\n",
      "Iter: 8 trainloss:0.4231479823589325 validscore:0.7546218487394958  validacc:0.75  testscore:0.805218525766471  testacc:0.775\n",
      "Iter: 9 trainloss:0.3568063795566559 validscore:0.7408356326369727  validacc:0.74  testscore:0.8166197183098591  testacc:0.785\n",
      "Iter: 10 trainloss:0.33397871255874634 validscore:0.7301724137931034  validacc:0.73  testscore:0.805622009569378  testacc:0.775\n",
      "**********************************************\n",
      "处理：===春节放鞭炮===\n",
      "Found 7460 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.01771240234375 validscore:0.6107323232323232  validacc:0.56  testscore:0.7283209951289319  testacc:0.695\n",
      "Iter: 2 trainloss:0.8662043452262879 validscore:0.6176698527512271  validacc:0.56  testscore:0.7617155158138765  testacc:0.73\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 3 trainloss:0.7449966549873352 validscore:0.647317388493859  validacc:0.58  testscore:0.7580183276059564  testacc:0.725\n",
      "Iter: 4 trainloss:0.6720381140708923 validscore:0.6657267367341708  validacc:0.6  testscore:0.7739016617345778  testacc:0.74\n",
      "Iter: 5 trainloss:0.578732681274414 validscore:0.6668796319959112  validacc:0.6  testscore:0.7884473039913455  testacc:0.75\n",
      "Iter: 6 trainloss:0.5183629155158996 validscore:0.6666666666666667  validacc:0.62  testscore:0.7912749706227967  testacc:0.745\n",
      "Iter: 7 trainloss:0.4576001107692719 validscore:0.6973856209150326  validacc:0.64  testscore:0.7468832309043021  testacc:0.705\n",
      "Iter: 8 trainloss:0.42271748185157776 validscore:0.6885638297872341  validacc:0.64  testscore:0.7902587079057668  testacc:0.745\n",
      "Iter: 9 trainloss:0.3664019227027893 validscore:0.6848238482384823  validacc:0.63  testscore:0.7805229142185663  testacc:0.73\n",
      "Iter: 10 trainloss:0.3177389144897461 validscore:0.7207792207792207  validacc:0.66  testscore:0.7670294943820226  testacc:0.72\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.000724232196808 validscore:0.6181818181818182  validacc:0.56  testscore:0.6624129600682108  testacc:0.645\n",
      "Iter: 2 trainloss:0.8348707795143128 validscore:0.6073059360730594  validacc:0.57  testscore:0.7357293868921776  testacc:0.705\n",
      "Iter: 3 trainloss:0.7616477012634277 validscore:0.66983016983017  validacc:0.61  testscore:0.7360603037357277  testacc:0.71\n",
      "Iter: 4 trainloss:0.6430902004241943 validscore:0.6303280805993283  validacc:0.57  testscore:0.7381838691483362  testacc:0.705\n",
      "Iter: 5 trainloss:0.6060686349868775 validscore:0.6390957086774063  validacc:0.6  testscore:0.7092031755366068  testacc:0.67\n",
      "Iter: 6 trainloss:0.5325690329074859 validscore:0.6592238171185538  validacc:0.61  testscore:0.7781332872131904  testacc:0.735\n",
      "Iter: 7 trainloss:0.47265495657920836 validscore:0.7089481521591985  validacc:0.66  testscore:0.7660994919059436  testacc:0.725\n",
      "Iter: 8 trainloss:0.4154430627822876 validscore:0.7080932425353588  validacc:0.65  testscore:0.77757331799885  testacc:0.735\n",
      "Iter: 9 trainloss:0.3492245614528656 validscore:0.694377990430622  validacc:0.65  testscore:0.7782851649342066  testacc:0.73\n",
      "Iter: 10 trainloss:0.2914363771677017 validscore:0.6860902255639096  validacc:0.64  testscore:0.8027187473113655  testacc:0.76\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.039928376674652 validscore:0.6275  validacc:0.57  testscore:0.7703270450613724  testacc:0.735\n",
      "Iter: 2 trainloss:0.8515795946121216 validscore:0.6456400742115027  validacc:0.6  testscore:0.763584313071514  testacc:0.725\n",
      "Iter: 3 trainloss:0.7281490445137024 validscore:0.6715538847117793  validacc:0.61  testscore:0.7328666277745367  testacc:0.705\n",
      "Iter: 4 trainloss:0.6499995350837707 validscore:0.6841491841491842  validacc:0.64  testscore:0.7869361059869193  testacc:0.75\n",
      "Iter: 5 trainloss:0.5970519065856934 validscore:0.646873207114171  validacc:0.6  testscore:0.7109756097560975  testacc:0.68\n",
      "Iter: 6 trainloss:0.5352381408214569 validscore:0.6760683760683761  validacc:0.64  testscore:0.7804347826086957  testacc:0.745\n",
      "Iter: 7 trainloss:0.4686095416545868 validscore:0.681793842034806  validacc:0.64  testscore:0.797838327250092  testacc:0.76\n",
      "Iter: 8 trainloss:0.4149263083934784 validscore:0.6769400110071546  validacc:0.64  testscore:0.8070195760454071  testacc:0.77\n",
      "Iter: 9 trainloss:0.34791340827941897 validscore:0.6900867586403072  validacc:0.65  testscore:0.8109055842903481  testacc:0.77\n",
      "Iter: 10 trainloss:0.2977989077568054 validscore:0.7024957458876915  validacc:0.65  testscore:0.8106718097074896  testacc:0.77\n",
      "**********************************************\n",
      "处理：===开放二胎===\n",
      "Found 7357 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0672861099243165 validscore:0.7372099524741405  validacc:0.68  testscore:0.7169256729172904  testacc:0.695\n",
      "Iter: 2 trainloss:0.9382245659828186 validscore:0.7131193693693694  validacc:0.67  testscore:0.7592782017992499  testacc:0.745\n",
      "Iter: 3 trainloss:0.8387697458267211 validscore:0.7198879551820728  validacc:0.67  testscore:0.764102564102564  testacc:0.75\n",
      "Iter: 4 trainloss:0.7811736702919007 validscore:0.7619654488813368  validacc:0.71  testscore:0.804269328802039  testacc:0.79\n",
      "Iter: 5 trainloss:0.7076759457588195 validscore:0.7273469387755103  validacc:0.68  testscore:0.7872474416163737  testacc:0.775\n",
      "Iter: 6 trainloss:0.6551117062568664 validscore:0.7374681393372982  validacc:0.69  testscore:0.8174311926605504  testacc:0.8\n",
      "Iter: 7 trainloss:0.5846018671989441 validscore:0.7167203435319377  validacc:0.67  testscore:0.8033466092860388  testacc:0.785\n",
      "Iter: 8 trainloss:0.5666738092899323 validscore:0.7834931705899447  validacc:0.73  testscore:0.8041666666666667  testacc:0.795\n",
      "Iter: 9 trainloss:0.5160125732421875 validscore:0.710814606741573  validacc:0.65  testscore:0.7835762736325589  testacc:0.745\n",
      "Iter: 10 trainloss:0.4443694591522217 validscore:0.7512376237623763  validacc:0.7  testscore:0.8065594092758779  testacc:0.79\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0768916368484498 validscore:0.3982905982905983  validacc:0.41  testscore:0.48333333333333334  testacc:0.545\n",
      "Iter: 2 trainloss:0.970881462097168 validscore:0.6677404509894156  validacc:0.65  testscore:0.7309465020576131  testacc:0.73\n",
      "Iter: 3 trainloss:0.9030907034873963 validscore:0.7503607503607503  validacc:0.7  testscore:0.7705352411234765  testacc:0.76\n",
      "Iter: 4 trainloss:0.8037645697593689 validscore:0.6851851851851852  validacc:0.64  testscore:0.7805944055944056  testacc:0.76\n",
      "Iter: 5 trainloss:0.7654634714126587 validscore:0.753238170763944  validacc:0.7  testscore:0.794665404040404  testacc:0.78\n",
      "Iter: 6 trainloss:0.6800679564476013 validscore:0.7633053221288515  validacc:0.71  testscore:0.8044258373205742  testacc:0.78\n",
      "Iter: 7 trainloss:0.6231831789016724 validscore:0.71613250740641  validacc:0.66  testscore:0.8152016627640821  testacc:0.8\n",
      "Iter: 8 trainloss:0.5591488420963288 validscore:0.7664884135472372  validacc:0.7  testscore:0.819811320754717  testacc:0.79\n",
      "Iter: 9 trainloss:0.48514131307601926 validscore:0.7320574162679425  validacc:0.67  testscore:0.839404685149366  testacc:0.815\n",
      "Iter: 10 trainloss:0.42834911346435545 validscore:0.7445652173913044  validacc:0.68  testscore:0.8127355157795573  testacc:0.76\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0614961624145507 validscore:0.6944444444444444  validacc:0.65  testscore:0.6963150213697487  testacc:0.685\n",
      "Iter: 2 trainloss:0.9190587878227234 validscore:0.7412915851272017  validacc:0.69  testscore:0.7482383419689119  testacc:0.735\n",
      "Iter: 3 trainloss:0.8447206854820252 validscore:0.7492811173490346  validacc:0.7  testscore:0.7738521524627995  testacc:0.755\n",
      "Iter: 4 trainloss:0.7737921118736267 validscore:0.7866666666666666  validacc:0.74  testscore:0.8032349246231156  testacc:0.785\n",
      "Iter: 5 trainloss:0.7146578192710876 validscore:0.7885969664138679  validacc:0.73  testscore:0.8031780330979235  testacc:0.785\n",
      "Iter: 6 trainloss:0.6507607460021972 validscore:0.7697368421052632  validacc:0.7  testscore:0.7941520467836257  testacc:0.77\n",
      "Iter: 7 trainloss:0.62102130651474 validscore:0.7869471413160734  validacc:0.73  testscore:0.7937772312956082  testacc:0.77\n",
      "Iter: 8 trainloss:0.5333166062831879 validscore:0.7742257742257741  validacc:0.72  testscore:0.7886347886347886  testacc:0.755\n",
      "Iter: 9 trainloss:0.4684687376022339 validscore:0.8  validacc:0.74  testscore:0.8094196714316193  testacc:0.79\n",
      "Iter: 10 trainloss:0.39210265278816225 validscore:0.7511441647597255  validacc:0.69  testscore:0.7926523297491039  testacc:0.735\n",
      "**********************************************\n",
      "处理：===iphone se===\n",
      "Found 4780 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.092439091205597 validscore:0.5214285714285715  validacc:0.51  testscore:0.4982859447469248  testacc:0.46\n",
      "Iter: 2 trainloss:0.9640267729759217 validscore:0.4472135687088958  validacc:0.47  testscore:0.48543616341564255  testacc:0.455\n",
      "Iter: 3 trainloss:0.8783814072608948 validscore:0.5686809026200212  validacc:0.53  testscore:0.5118466898954704  testacc:0.49\n",
      "Iter: 4 trainloss:0.8054379105567933 validscore:0.4607768469154607  validacc:0.49  testscore:0.47319259445421796  testacc:0.43\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 5 trainloss:0.7418405175209045 validscore:0.5775206087507927  validacc:0.55  testscore:0.519237684286228  testacc:0.49\n",
      "Iter: 6 trainloss:0.6956810235977173 validscore:0.45276402640264024  validacc:0.47  testscore:0.45001478852410526  testacc:0.4\n",
      "Iter: 7 trainloss:0.6173457384109498 validscore:0.5762981841350748  validacc:0.55  testscore:0.5294586734215218  testacc:0.485\n",
      "Iter: 8 trainloss:0.5384639859199524 validscore:0.5043478260869565  validacc:0.52  testscore:0.4852941176470588  testacc:0.43\n",
      "Iter: 9 trainloss:0.49021286964416505 validscore:0.5023076923076922  validacc:0.52  testscore:0.5388695987654321  testacc:0.465\n",
      "Iter: 10 trainloss:0.4153408706188202 validscore:0.5454545454545454  validacc:0.51  testscore:0.5360694775178596  testacc:0.475\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0826433658599854 validscore:0.4507144149032375  validacc:0.46  testscore:0.391547367820681  testacc:0.41\n",
      "Iter: 2 trainloss:0.9817468762397766 validscore:0.4761176270610233  validacc:0.5  testscore:0.5108543417366946  testacc:0.475\n",
      "Iter: 3 trainloss:0.8741128206253052 validscore:0.5335428122545169  validacc:0.52  testscore:0.507937806873977  testacc:0.48\n",
      "Iter: 4 trainloss:0.8287734746932983 validscore:0.5265700483091786  validacc:0.53  testscore:0.4658119658119658  testacc:0.425\n",
      "Iter: 5 trainloss:0.7541538834571838 validscore:0.5168350168350169  validacc:0.53  testscore:0.4740000000000001  testacc:0.415\n",
      "Iter: 6 trainloss:0.7076744794845581 validscore:0.534991974317817  validacc:0.53  testscore:0.5396825396825398  testacc:0.495\n",
      "Iter: 7 trainloss:0.6568525791168213 validscore:0.5433583959899749  validacc:0.54  testscore:0.538122029294791  testacc:0.47\n",
      "Iter: 8 trainloss:0.5818024635314941 validscore:0.5416355489171023  validacc:0.56  testscore:0.4990661963062876  testacc:0.43\n",
      "Iter: 9 trainloss:0.519366329908371 validscore:0.5997812158149711  validacc:0.58  testscore:0.5321380888804119  testacc:0.51\n",
      "Iter: 10 trainloss:0.4793384730815887 validscore:0.4875204582651391  validacc:0.52  testscore:0.4868449662970211  testacc:0.415\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0783041715621948 validscore:0.47878787878787876  validacc:0.51  testscore:0.41461988304093567  testacc:0.38\n",
      "Iter: 2 trainloss:0.9559062004089356 validscore:0.6045836516424752  validacc:0.57  testscore:0.5368731563421829  testacc:0.52\n",
      "Iter: 3 trainloss:0.84882572889328 validscore:0.5740740740740741  validacc:0.58  testscore:0.5283749279301541  testacc:0.485\n",
      "Iter: 4 trainloss:0.8061496019363403 validscore:0.5511647972389991  validacc:0.54  testscore:0.48578438447106365  testacc:0.435\n",
      "Iter: 5 trainloss:0.7462653517723083 validscore:0.5590329524755754  validacc:0.56  testscore:0.5129490392648288  testacc:0.455\n",
      "Iter: 6 trainloss:0.7076753854751587 validscore:0.6029209621993127  validacc:0.58  testscore:0.5127272727272727  testacc:0.45\n",
      "Iter: 7 trainloss:0.6281341433525085 validscore:0.5664607237422772  validacc:0.57  testscore:0.5315524944154877  testacc:0.465\n",
      "Iter: 8 trainloss:0.5591665863990783 validscore:0.5862524785194976  validacc:0.57  testscore:0.5225313479623824  testacc:0.47\n",
      "Iter: 9 trainloss:0.5073441505432129 validscore:0.5588235294117647  validacc:0.56  testscore:0.5015684907633322  testacc:0.44\n",
      "Iter: 10 trainloss:0.45821415185928344 validscore:0.6125490196078431  validacc:0.59  testscore:0.5089668615984405  testacc:0.475\n",
      "**********************************************\n"
     ]
    }
   ],
   "source": [
    "# target_sentence选择6轮最好的保存\n",
    "def data_generator(train_x0,train_x1,label,batch_size):\n",
    "    idx = np.random.permutation(len(label))\n",
    "    shuffled_train_x0,shuffled_train_x1,shuffled_train_y = train_x0[idx],train_x1[idx],label[idx]\n",
    "# num_batches = int(np.ceil(len(tweets) / float(batch_size))) 若除不开最后用\n",
    "    for i in range(int(len(label)//batch_size)):\n",
    "        yield shuffled_train_x0[i * batch_size:(i + 1) * batch_size], shuffled_train_x1[i * batch_size:(i + 1) * batch_size],shuffled_train_y[i * batch_size:(i + 1) * batch_size]\n",
    "\n",
    "print('[Model] Model Compile Done.', end='\\n')\n",
    "data_list=[[train_weibo_russia,test_weibo_russia],\n",
    "          [train_weibo_shenzhen,test_weibo_shenzhen],\n",
    "          [train_weibo_chunjie,test_weibo_chunjie],\n",
    "          [train_weibo_kaifang,test_weibo_kaifang],\n",
    "          [train_weibo_phone,test_weibo_phone]]\n",
    "\n",
    "for tar in data_list:\n",
    "    train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(tar[0],tar[1])\n",
    "    for num in range(3):\n",
    "        model = target_sentence()\n",
    "        opt = Adam(lr=0.001)\n",
    "        model.compile(optimizer=opt, loss='categorical_crossentropy')\n",
    "        print('-----------------------------------------')\n",
    "        for i_e in range(10):\n",
    "            iter_data = data_generator(train_data[0][:500],train_data[1][:500],train_data[2][:500],100)\n",
    "            loss,acc,total=0,0,0\n",
    "            for x0,x1,label_iter in iter_data:\n",
    "        #         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\n",
    "                history = model.train_on_batch([x0,x1],label_iter)\n",
    "                num = len(label_iter)\n",
    "                loss,total=loss+history*num,total+num\n",
    "        #         print('Iter:%d\\tloss=%.6f' % (i_e, history.history['loss'][0]), end='\\n')\n",
    "            loss=loss/total\n",
    "            v_pred = [i.argmax() for i in model.predict([train_data[0][500:],train_data[1][500:]])]\n",
    "            v_true = [i.argmax() for i in train_data[2][500:]]\n",
    "            y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "            y_true = [i.argmax() for i in test_data[2]]\n",
    "            valid_score = f1_score(v_true,v_pred)\n",
    "            valid_acc=np.mean(np.equal(v_pred,v_true),axis=-1)\n",
    "            test_score = f1_score(y_true,y_pred)\n",
    "            test_acc=np.mean(np.equal(y_pred,y_true),axis=-1)\n",
    "            print('Iter: {} trainloss:{} validscore:{}  validacc:{}  testscore:{}  testacc:{}' .format (i_e+1,loss,valid_score,valid_acc,test_score,test_acc),end='\\n')\n",
    "        del model\n",
    "    print('**********************************************')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Model] Model Compile Done.\n",
      "处理：===俄罗斯叙利亚反恐行动===\n",
      "Found 5234 unique tokens.\n",
      "-----------------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\program\\Lib\\site-packages\\sklearn\\metrics\\classification.py:1113: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples.\n",
      "  'precision', 'predicted', average, warn_for)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 1 trainloss:1.070013701915741 validscore:0.5618226600985221  validacc:0.54  testscore:0.44553913519430766  testacc:0.48\n",
      "Iter: 2 trainloss:0.976631474494934 validscore:0.5368421052631579  validacc:0.51  testscore:0.5942028985507246  testacc:0.575\n",
      "Iter: 3 trainloss:0.9498296737670898 validscore:0.6330532212885154  validacc:0.59  testscore:0.5540123456790124  testacc:0.53\n",
      "Iter: 4 trainloss:0.8843255162239074 validscore:0.6436781609195402  validacc:0.6  testscore:0.5800713728114196  testacc:0.55\n",
      "Iter: 5 trainloss:0.8256144881248474 validscore:0.702664796633941  validacc:0.66  testscore:0.5879465041476215  testacc:0.555\n",
      "Iter: 6 trainloss:0.7800886511802674 validscore:0.66996699669967  validacc:0.63  testscore:0.569551282051282  testacc:0.545\n",
      "Iter: 7 trainloss:0.7060319185256958 validscore:0.6960227272727273  validacc:0.66  testscore:0.5661547377571718  testacc:0.54\n",
      "Iter: 8 trainloss:0.6747113585472106 validscore:0.6875  validacc:0.65  testscore:0.5694444444444444  testacc:0.54\n",
      "Iter: 9 trainloss:0.6055742740631104 validscore:0.7334074898034854  validacc:0.71  testscore:0.5804511278195488  testacc:0.54\n",
      "Iter: 10 trainloss:0.5374927163124085 validscore:0.7224348685022843  validacc:0.69  testscore:0.5901793590120552  testacc:0.555\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.080208134651184 validscore:0.5776576576576576  validacc:0.55  testscore:0.5277522935779816  testacc:0.51\n",
      "Iter: 2 trainloss:1.002446961402893 validscore:0.6500907441016335  validacc:0.59  testscore:0.5238849765258217  testacc:0.5\n",
      "Iter: 3 trainloss:0.953640878200531 validscore:0.6749083165739973  validacc:0.63  testscore:0.4832618025751073  testacc:0.475\n",
      "Iter: 4 trainloss:0.8738948225975036 validscore:0.6524170064065231  validacc:0.61  testscore:0.5808477237048666  testacc:0.55\n",
      "Iter: 5 trainloss:0.8416441440582275 validscore:0.7081402257872844  validacc:0.65  testscore:0.5445663696530384  testacc:0.515\n",
      "Iter: 6 trainloss:0.7870946049690246 validscore:0.6813186813186813  validacc:0.64  testscore:0.5693440997308401  testacc:0.535\n",
      "Iter: 7 trainloss:0.7217764616012573 validscore:0.667224880382775  validacc:0.63  testscore:0.5840568271507498  testacc:0.55\n",
      "Iter: 8 trainloss:0.6392979264259339 validscore:0.6832384566729918  validacc:0.64  testscore:0.5781099183948053  testacc:0.55\n",
      "Iter: 9 trainloss:0.56841641664505 validscore:0.6925093632958801  validacc:0.65  testscore:0.5561497326203209  testacc:0.525\n",
      "Iter: 10 trainloss:0.5105343818664551 validscore:0.6336441336441336  validacc:0.59  testscore:0.5540058479532164  testacc:0.525\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0752193331718445 validscore:0.5013701894435839  validacc:0.48  testscore:0.5849462365591398  testacc:0.57\n",
      "Iter: 2 trainloss:1.003699290752411 validscore:0.598820754716981  validacc:0.56  testscore:0.5360135470497212  testacc:0.525\n",
      "Iter: 3 trainloss:0.9385280966758728 validscore:0.5920305803312869  validacc:0.55  testscore:0.5893518131975175  testacc:0.56\n",
      "Iter: 4 trainloss:0.8967010617256165 validscore:0.6340080971659919  validacc:0.59  testscore:0.5789823744595943  testacc:0.55\n",
      "Iter: 5 trainloss:0.8489534139633179 validscore:0.6015406162464986  validacc:0.56  testscore:0.5545528660987884  testacc:0.53\n",
      "Iter: 6 trainloss:0.7817536473274231 validscore:0.6799272286234082  validacc:0.63  testscore:0.6305856465809073  testacc:0.6\n",
      "Iter: 7 trainloss:0.7533880233764648 validscore:0.6258503401360543  validacc:0.6  testscore:0.5776863231480438  testacc:0.55\n",
      "Iter: 8 trainloss:0.6516231060028076 validscore:0.626714135021097  validacc:0.59  testscore:0.5437696806117858  testacc:0.515\n",
      "Iter: 9 trainloss:0.5915429830551148 validscore:0.7321428571428572  validacc:0.7  testscore:0.5877331002331002  testacc:0.555\n",
      "Iter: 10 trainloss:0.5153262495994568 validscore:0.6228070175438597  validacc:0.6  testscore:0.542027417027417  testacc:0.51\n",
      "**********************************************\n",
      "处理：===深圳禁摩限电===\n",
      "Found 5763 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.061322021484375 validscore:0.5965818052341108  validacc:0.64  testscore:0.6604833219877467  testacc:0.67\n",
      "Iter: 2 trainloss:0.8885849237442016 validscore:0.6330492424242424  validacc:0.69  testscore:0.7475505524286011  testacc:0.745\n",
      "Iter: 3 trainloss:0.7866272807121277 validscore:0.6651305683563749  validacc:0.69  testscore:0.7592249784030607  testacc:0.725\n",
      "Iter: 4 trainloss:0.7243599414825439 validscore:0.7096370463078849  validacc:0.71  testscore:0.788353065145974  testacc:0.755\n",
      "Iter: 5 trainloss:0.6373480439186097 validscore:0.7485430606518455  validacc:0.73  testscore:0.7989391260419297  testacc:0.76\n",
      "Iter: 6 trainloss:0.5938818335533143 validscore:0.7283613445378152  validacc:0.72  testscore:0.8177355207556549  testacc:0.78\n",
      "Iter: 7 trainloss:0.5332540273666382 validscore:0.7026862026862026  validacc:0.69  testscore:0.806924882629108  testacc:0.77\n",
      "Iter: 8 trainloss:0.4716121435165405 validscore:0.6987219777917453  validacc:0.69  testscore:0.7928098391674552  testacc:0.76\n",
      "Iter: 9 trainloss:0.41061217784881593 validscore:0.683695652173913  validacc:0.7  testscore:0.795518565941101  testacc:0.775\n",
      "Iter: 10 trainloss:0.36748132705688474 validscore:0.6798069187449718  validacc:0.7  testscore:0.8097432521395656  testacc:0.79\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:0.966338849067688 validscore:0.6002162162162162  validacc:0.65  testscore:0.7031025919059778  testacc:0.705\n",
      "Iter: 2 trainloss:0.8430842995643616 validscore:0.6806722689075629  validacc:0.69  testscore:0.7497397188964081  testacc:0.735\n",
      "Iter: 3 trainloss:0.7338346838951111 validscore:0.6937596302003082  validacc:0.7  testscore:0.7850320472539902  testacc:0.76\n",
      "Iter: 4 trainloss:0.6598303556442261 validscore:0.743018018018018  validacc:0.72  testscore:0.7927479445015417  testacc:0.77\n",
      "Iter: 5 trainloss:0.585420286655426 validscore:0.7233050847457627  validacc:0.71  testscore:0.7879069767441861  testacc:0.75\n",
      "Iter: 6 trainloss:0.5302520036697388 validscore:0.7074561403508772  validacc:0.69  testscore:0.7960918665144017  testacc:0.765\n",
      "Iter: 7 trainloss:0.4819106459617615 validscore:0.7233050847457627  validacc:0.71  testscore:0.8020741671904462  testacc:0.77\n",
      "Iter: 8 trainloss:0.39849222302436826 validscore:0.6898305084745763  validacc:0.68  testscore:0.8054100119941923  testacc:0.78\n",
      "Iter: 9 trainloss:0.3658779501914978 validscore:0.697522816166884  validacc:0.69  testscore:0.8100183069250677  testacc:0.785\n",
      "Iter: 10 trainloss:0.32525813579559326 validscore:0.6888888888888889  validacc:0.68  testscore:0.7898351648351648  testacc:0.765\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0754868745803834 validscore:0.5251832215811634  validacc:0.56  testscore:0.6976671265521133  testacc:0.68\n",
      "Iter: 2 trainloss:0.9259826898574829 validscore:0.5909090909090908  validacc:0.66  testscore:0.6662210338680927  testacc:0.69\n",
      "Iter: 3 trainloss:0.8433547854423523 validscore:0.6369878183831672  validacc:0.66  testscore:0.7625175315568022  testacc:0.755\n",
      "Iter: 4 trainloss:0.7671825408935546 validscore:0.6743232157506153  validacc:0.66  testscore:0.7755004195133645  testacc:0.735\n",
      "Iter: 5 trainloss:0.6790933251380921 validscore:0.7201612903225807  validacc:0.71  testscore:0.7901186790505677  testacc:0.76\n",
      "Iter: 6 trainloss:0.606820034980774 validscore:0.7243170450173144  validacc:0.72  testscore:0.7830746832698323  testacc:0.745\n",
      "Iter: 7 trainloss:0.559201717376709 validscore:0.7640914465904611  validacc:0.75  testscore:0.8177355207556549  testacc:0.785\n",
      "Iter: 8 trainloss:0.50791996717453 validscore:0.7086956521739131  validacc:0.72  testscore:0.8093522906793049  testacc:0.78\n",
      "Iter: 9 trainloss:0.4610369324684143 validscore:0.7146042363433668  validacc:0.71  testscore:0.8167357801572074  testacc:0.79\n",
      "Iter: 10 trainloss:0.3965378224849701 validscore:0.7233169129720854  validacc:0.71  testscore:0.8146478873239436  testacc:0.79\n",
      "**********************************************\n",
      "处理：===春节放鞭炮===\n",
      "Found 7460 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.053802728652954 validscore:0.6447709593777009  validacc:0.58  testscore:0.7546204620462047  testacc:0.72\n",
      "Iter: 2 trainloss:0.8737374424934388 validscore:0.6594315245478036  validacc:0.59  testscore:0.7516305693920797  testacc:0.72\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 3 trainloss:0.7308433651924133 validscore:0.677868523229348  validacc:0.63  testscore:0.7819095477386935  testacc:0.75\n",
      "Iter: 4 trainloss:0.6627344012260437 validscore:0.6440613026819924  validacc:0.58  testscore:0.7707070707070707  testacc:0.735\n",
      "Iter: 5 trainloss:0.5960227370262146 validscore:0.6702898550724637  validacc:0.63  testscore:0.7553075197054255  testacc:0.72\n",
      "Iter: 6 trainloss:0.554318243265152 validscore:0.6461260258728614  validacc:0.61  testscore:0.7417202692595364  testacc:0.695\n",
      "Iter: 7 trainloss:0.5033153474330903 validscore:0.6781609195402298  validacc:0.64  testscore:0.7553117380703587  testacc:0.72\n",
      "Iter: 8 trainloss:0.45387200713157655 validscore:0.6571150368618723  validacc:0.62  testscore:0.7840507290844803  testacc:0.74\n",
      "Iter: 9 trainloss:0.4100741267204285 validscore:0.6940753045404209  validacc:0.65  testscore:0.7774430372341982  testacc:0.74\n",
      "Iter: 10 trainloss:0.3696815609931946 validscore:0.6506579839913174  validacc:0.61  testscore:0.7883841288096608  testacc:0.745\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0349555015563965 validscore:0.6555555555555557  validacc:0.59  testscore:0.7163374725671583  testacc:0.69\n",
      "Iter: 2 trainloss:0.8396568655967712 validscore:0.6636363636363636  validacc:0.61  testscore:0.7819095477386935  testacc:0.75\n",
      "Iter: 3 trainloss:0.7418828964233398 validscore:0.681398252184769  validacc:0.62  testscore:0.7762004520646122  testacc:0.745\n",
      "Iter: 4 trainloss:0.6777264595031738 validscore:0.6326608784473953  validacc:0.57  testscore:0.7772177419354839  testacc:0.745\n",
      "Iter: 5 trainloss:0.6174795031547546 validscore:0.614977614977615  validacc:0.58  testscore:0.7693482987600635  testacc:0.73\n",
      "Iter: 6 trainloss:0.5684604823589325 validscore:0.6364458220328038  validacc:0.58  testscore:0.7524448650663913  testacc:0.715\n",
      "Iter: 7 trainloss:0.5243871688842774 validscore:0.6669387755102041  validacc:0.63  testscore:0.7922979797979798  testacc:0.75\n",
      "Iter: 8 trainloss:0.48293226957321167 validscore:0.6930555555555555  validacc:0.64  testscore:0.7620535714285714  testacc:0.725\n",
      "Iter: 9 trainloss:0.4236362636089325 validscore:0.6542451542451542  validacc:0.62  testscore:0.7919928876652615  testacc:0.75\n",
      "Iter: 10 trainloss:0.37888389825820923 validscore:0.6656593406593406  validacc:0.61  testscore:0.7413638194379899  testacc:0.695\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0180551886558533 validscore:0.5885167464114833  validacc:0.54  testscore:0.7329754644332649  testacc:0.7\n",
      "Iter: 2 trainloss:0.8701126098632812 validscore:0.6705368289637953  validacc:0.61  testscore:0.7801047120418849  testacc:0.745\n",
      "Iter: 3 trainloss:0.7530861973762513 validscore:0.643198390746794  validacc:0.59  testscore:0.775911286780852  testacc:0.74\n",
      "Iter: 4 trainloss:0.659739351272583 validscore:0.6441521961184883  validacc:0.58  testscore:0.7580960357342267  testacc:0.725\n",
      "Iter: 5 trainloss:0.6064046740531921 validscore:0.6521138436032052  validacc:0.63  testscore:0.7750000000000001  testacc:0.735\n",
      "Iter: 6 trainloss:0.5478213965892792 validscore:0.6706827309236949  validacc:0.64  testscore:0.7544390637610977  testacc:0.72\n",
      "Iter: 7 trainloss:0.4913883447647095 validscore:0.6691535539880629  validacc:0.63  testscore:0.7853239656518345  testacc:0.745\n",
      "Iter: 8 trainloss:0.4272115886211395 validscore:0.736863711001642  validacc:0.69  testscore:0.7711762223427987  testacc:0.735\n",
      "Iter: 9 trainloss:0.37936554551124574 validscore:0.6595394736842105  validacc:0.63  testscore:0.7919782266150981  testacc:0.755\n",
      "Iter: 10 trainloss:0.34076281189918517 validscore:0.7130339539978094  validacc:0.67  testscore:0.7617357001972387  testacc:0.725\n",
      "**********************************************\n",
      "处理：===开放二胎===\n",
      "Found 7357 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.1053884029388428 validscore:0.6626995645863571  validacc:0.61  testscore:0.7125470878006375  testacc:0.7\n",
      "Iter: 2 trainloss:0.9571278929710388 validscore:0.7036775106082036  validacc:0.65  testscore:0.7276612276612278  testacc:0.715\n",
      "Iter: 3 trainloss:0.8744378805160522 validscore:0.7326892109500805  validacc:0.68  testscore:0.7562736956129266  testacc:0.745\n",
      "Iter: 4 trainloss:0.7984149217605591 validscore:0.7184002184002184  validacc:0.67  testscore:0.7604701646635251  testacc:0.745\n",
      "Iter: 5 trainloss:0.7455118656158447 validscore:0.7464285714285714  validacc:0.7  testscore:0.7583277210796249  testacc:0.75\n",
      "Iter: 6 trainloss:0.6766274690628051 validscore:0.7485994397759104  validacc:0.7  testscore:0.794736703458813  testacc:0.78\n",
      "Iter: 7 trainloss:0.6220750927925109 validscore:0.7626346313173156  validacc:0.71  testscore:0.8175346670900815  testacc:0.8\n",
      "Iter: 8 trainloss:0.554591715335846 validscore:0.7344463470319634  validacc:0.68  testscore:0.7917415670931585  testacc:0.775\n",
      "Iter: 9 trainloss:0.5205156624317169 validscore:0.7695526695526695  validacc:0.72  testscore:0.8013468013468013  testacc:0.78\n",
      "Iter: 10 trainloss:0.45785909295082095 validscore:0.743421052631579  validacc:0.69  testscore:0.7903597280105286  testacc:0.765\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0693169593811036 validscore:0.6628787878787878  validacc:0.63  testscore:0.6953242835595776  testacc:0.68\n",
      "Iter: 2 trainloss:0.9679859042167663 validscore:0.7492811173490346  validacc:0.7  testscore:0.7342860854878525  testacc:0.725\n",
      "Iter: 3 trainloss:0.8993527293205261 validscore:0.7362376237623762  validacc:0.69  testscore:0.7712974422442245  testacc:0.76\n",
      "Iter: 4 trainloss:0.8275563716888428 validscore:0.7253531381971748  validacc:0.68  testscore:0.7714827352742044  testacc:0.75\n",
      "Iter: 5 trainloss:0.7539522767066955 validscore:0.751003478726251  validacc:0.7  testscore:0.7898266747323848  testacc:0.775\n",
      "Iter: 6 trainloss:0.705721628665924 validscore:0.7527950310559006  validacc:0.71  testscore:0.7969944226399505  testacc:0.785\n",
      "Iter: 7 trainloss:0.6464183330535889 validscore:0.7493719521205853  validacc:0.7  testscore:0.7940062388591801  testacc:0.77\n",
      "Iter: 8 trainloss:0.5842834353446961 validscore:0.7583540458436895  validacc:0.71  testscore:0.8224574247970715  testacc:0.81\n",
      "Iter: 9 trainloss:0.5620791137218475 validscore:0.7583540458436895  validacc:0.71  testscore:0.7908959692077409  testacc:0.775\n",
      "Iter: 10 trainloss:0.5094327330589294 validscore:0.7568115942028986  validacc:0.7  testscore:0.7937960844139333  testacc:0.76\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0841437578201294 validscore:0.7119612470500559  validacc:0.65  testscore:0.7295918367346939  testacc:0.715\n",
      "Iter: 2 trainloss:0.9591117620468139 validscore:0.7693661971830986  validacc:0.71  testscore:0.7332384566729917  testacc:0.715\n",
      "Iter: 3 trainloss:0.8915959000587463 validscore:0.7867924528301887  validacc:0.73  testscore:0.7388278388278389  testacc:0.725\n",
      "Iter: 4 trainloss:0.8244381666183471 validscore:0.7447150120417447  validacc:0.69  testscore:0.7625777583760778  testacc:0.735\n",
      "Iter: 5 trainloss:0.7676726222038269 validscore:0.7495709570957095  validacc:0.7  testscore:0.7594828507319158  testacc:0.74\n",
      "Iter: 6 trainloss:0.740706467628479 validscore:0.7630727762803236  validacc:0.71  testscore:0.8032018326629552  testacc:0.79\n",
      "Iter: 7 trainloss:0.6758957982063294 validscore:0.7497866287339972  validacc:0.69  testscore:0.773968253968254  testacc:0.72\n",
      "Iter: 8 trainloss:0.6202455759048462 validscore:0.7848423876592892  validacc:0.73  testscore:0.8190338860850758  testacc:0.805\n",
      "Iter: 9 trainloss:0.5643558263778686 validscore:0.7758094961521709  validacc:0.72  testscore:0.8175824175824176  testacc:0.76\n",
      "Iter: 10 trainloss:0.5134211778640747 validscore:0.7703296703296703  validacc:0.71  testscore:0.8236718965042664  testacc:0.795\n",
      "**********************************************\n",
      "处理：===iphone se===\n",
      "Found 4780 unique tokens.\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.1099436283111572 validscore:0.4949494949494949  validacc:0.47  testscore:0.4490937569926158  testacc:0.47\n",
      "Iter: 2 trainloss:0.974218738079071 validscore:0.4797979797979798  validacc:0.52  testscore:0.41825136612021857  testacc:0.36\n",
      "Iter: 3 trainloss:0.9031685948371887 validscore:0.5380952380952381  validacc:0.54  testscore:0.4976174863387978  testacc:0.465\n",
      "Iter: 4 trainloss:0.8337954163551331 validscore:0.528344671201814  validacc:0.53  testscore:0.5153125562961629  testacc:0.475\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter: 5 trainloss:0.7912734031677247 validscore:0.5403389830508475  validacc:0.55  testscore:0.5084396099024755  testacc:0.455\n",
      "Iter: 6 trainloss:0.7609133958816529 validscore:0.5366666666666666  validacc:0.55  testscore:0.5166550522648083  testacc:0.465\n",
      "Iter: 7 trainloss:0.7006391525268555 validscore:0.5550667970022809  validacc:0.56  testscore:0.5279115391474942  testacc:0.48\n",
      "Iter: 8 trainloss:0.6350942015647888 validscore:0.5768849206349207  validacc:0.57  testscore:0.5591831064184496  testacc:0.505\n",
      "Iter: 9 trainloss:0.6207467794418335 validscore:0.5887177770338132  validacc:0.59  testscore:0.5607406363080972  testacc:0.505\n",
      "Iter: 10 trainloss:0.5571300148963928 validscore:0.5601478494623655  validacc:0.55  testscore:0.5435155208531708  testacc:0.485\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.093523120880127 validscore:0.41808731808731814  validacc:0.44  testscore:0.3690058479532164  testacc:0.365\n",
      "Iter: 2 trainloss:0.9718220710754395 validscore:0.5099999999999999  validacc:0.51  testscore:0.5132801638984414  testacc:0.48\n",
      "Iter: 3 trainloss:0.9078507900238038 validscore:0.4472549019607842  validacc:0.48  testscore:0.4220654573375564  testacc:0.375\n",
      "Iter: 4 trainloss:0.8447993993759155 validscore:0.47981132075471694  validacc:0.49  testscore:0.4420454545454545  testacc:0.4\n",
      "Iter: 5 trainloss:0.7802115082740784 validscore:0.5471220746363061  validacc:0.54  testscore:0.5052778246917751  testacc:0.47\n",
      "Iter: 6 trainloss:0.731482720375061 validscore:0.5337037037037037  validacc:0.55  testscore:0.46270846578960245  testacc:0.41\n",
      "Iter: 7 trainloss:0.6669716000556946 validscore:0.5132569558101474  validacc:0.52  testscore:0.47298850574712636  testacc:0.435\n",
      "Iter: 8 trainloss:0.6166037559509278 validscore:0.5445262805490458  validacc:0.55  testscore:0.568763280917977  testacc:0.51\n",
      "Iter: 9 trainloss:0.5598934412002563 validscore:0.5232974910394265  validacc:0.52  testscore:0.5356926799758016  testacc:0.47\n",
      "Iter: 10 trainloss:0.49218562841415403 validscore:0.5458333333333334  validacc:0.55  testscore:0.5299019607843138  testacc:0.47\n",
      "-----------------------------------------\n",
      "Iter: 1 trainloss:1.0799322605133057 validscore:0.4074074074074074  validacc:0.42  testscore:0.3640798009853178  testacc:0.475\n",
      "Iter: 2 trainloss:0.9715364694595336 validscore:0.3448509485094851  validacc:0.47  testscore:0.4614872798434443  testacc:0.435\n",
      "Iter: 3 trainloss:0.8990709781646729 validscore:0.5490196078431373  validacc:0.52  testscore:0.5047231697717134  testacc:0.49\n",
      "Iter: 4 trainloss:0.8336293935775757 validscore:0.5057971014492754  validacc:0.54  testscore:0.5214294639510184  testacc:0.465\n",
      "Iter: 5 trainloss:0.770923376083374 validscore:0.5619136960600375  validacc:0.54  testscore:0.5150186071238703  testacc:0.5\n",
      "Iter: 6 trainloss:0.730181896686554 validscore:0.524561403508772  validacc:0.55  testscore:0.5453197099538563  testacc:0.485\n",
      "Iter: 7 trainloss:0.6987830400466919 validscore:0.5355092668525504  validacc:0.53  testscore:0.5425415871284429  testacc:0.5\n",
      "Iter: 8 trainloss:0.6141099333763123 validscore:0.57  validacc:0.57  testscore:0.5177013593020896  testacc:0.455\n",
      "Iter: 9 trainloss:0.5904928565025329 validscore:0.5287723785166241  validacc:0.52  testscore:0.5700267618198037  testacc:0.52\n",
      "Iter: 10 trainloss:0.500314223766327 validscore:0.5510204081632653  validacc:0.57  testscore:0.52099173553719  testacc:0.445\n",
      "**********************************************\n"
     ]
    }
   ],
   "source": [
    "# atae选择6轮最好的保存\n",
    "def data_generator(train_x0,train_x1,label,batch_size):\n",
    "    idx = np.random.permutation(len(label))\n",
    "    shuffled_train_x0,shuffled_train_x1,shuffled_train_y = train_x0[idx],train_x1[idx],label[idx]\n",
    "# num_batches = int(np.ceil(len(tweets) / float(batch_size))) 若除不开最后用\n",
    "    for i in range(int(len(label)//batch_size)):\n",
    "        yield shuffled_train_x0[i * batch_size:(i + 1) * batch_size], shuffled_train_x1[i * batch_size:(i + 1) * batch_size],shuffled_train_y[i * batch_size:(i + 1) * batch_size]\n",
    "\n",
    "print('[Model] Model Compile Done.', end='\\n')\n",
    "data_list=[[train_weibo_russia,test_weibo_russia],\n",
    "          [train_weibo_shenzhen,test_weibo_shenzhen],\n",
    "          [train_weibo_chunjie,test_weibo_chunjie],\n",
    "          [train_weibo_kaifang,test_weibo_kaifang],\n",
    "          [train_weibo_phone,test_weibo_phone]]\n",
    "\n",
    "for tar in data_list:\n",
    "    train_data,test_data,time_steps,target_nums,embedding_matrix = get_train_data(tar[0],tar[1])\n",
    "    for num in range(3):\n",
    "        model =create_new()\n",
    "        opt = Adam(lr=0.001)\n",
    "        model.compile(optimizer=opt, loss='categorical_crossentropy')\n",
    "        print('-----------------------------------------')\n",
    "        for i_e in range(10):\n",
    "            iter_data = data_generator(train_data[0][:500],train_data[1][:500],train_data[2][:500],100)\n",
    "            loss,acc,total=0,0,0\n",
    "            for x0,x1,label_iter in iter_data:\n",
    "        #         print('[%s]\\t[Train:%s] ' % (time.strftime('%m-%d-%Y %H:%M:%S', time.localtime(time.time()))), end='\\n')\n",
    "                history = model.train_on_batch([x0,x1],label_iter)\n",
    "                num = len(label_iter)\n",
    "                loss,total=loss+history*num,total+num\n",
    "        #         print('Iter:%d\\tloss=%.6f' % (i_e, history.history['loss'][0]), end='\\n')\n",
    "            loss=loss/total\n",
    "            v_pred = [i.argmax() for i in model.predict([train_data[0][500:],train_data[1][500:]])]\n",
    "            v_true = [i.argmax() for i in train_data[2][500:]]\n",
    "            y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "            y_true = [i.argmax() for i in test_data[2]]\n",
    "            valid_score = f1_score(v_true,v_pred)\n",
    "            valid_acc=np.mean(np.equal(v_pred,v_true),axis=-1)\n",
    "            test_score = f1_score(y_true,y_pred)\n",
    "            test_acc=np.mean(np.equal(y_pred,y_true),axis=-1)\n",
    "            print('Iter: {} trainloss:{} validscore:{}  validacc:{}  testscore:{}  testacc:{}' .format (i_e+1,loss,valid_score,valid_acc,test_score,test_acc),end='\\n')\n",
    "        del model\n",
    "    print('**********************************************')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 600 samples, validate on 200 samples\n",
      "Epoch 1/50\n",
      "600/600 [==============================] - 13s 21ms/step - loss: 1.0747 - acc: 0.4100 - val_loss: 0.9471 - val_acc: 0.5100\n",
      "Epoch 2/50\n",
      "600/600 [==============================] - 9s 16ms/step - loss: 0.9750 - acc: 0.5083 - val_loss: 0.9420 - val_acc: 0.5500\n",
      "Epoch 3/50\n",
      "600/600 [==============================] - 9s 16ms/step - loss: 0.9394 - acc: 0.5583 - val_loss: 0.9352 - val_acc: 0.5400\n",
      "Epoch 4/50\n",
      "600/600 [==============================] - 9s 16ms/step - loss: 0.8945 - acc: 0.5933 - val_loss: 0.9150 - val_acc: 0.5600\n",
      "Epoch 5/50\n",
      "600/600 [==============================] - 10s 16ms/step - loss: 0.8595 - acc: 0.6150 - val_loss: 0.9244 - val_acc: 0.5400\n",
      "Epoch 6/50\n",
      "600/600 [==============================] - 10s 16ms/step - loss: 0.7888 - acc: 0.6933 - val_loss: 0.9647 - val_acc: 0.5350\n",
      "Epoch 7/50\n",
      "600/600 [==============================] - 9s 16ms/step - loss: 0.7311 - acc: 0.6900 - val_loss: 1.0018 - val_acc: 0.5400\n",
      "Epoch 8/50\n",
      "600/600 [==============================] - 10s 16ms/step - loss: 0.6485 - acc: 0.7733 - val_loss: 1.0326 - val_acc: 0.5150\n",
      "Epoch 9/50\n",
      "600/600 [==============================] - 9s 16ms/step - loss: 0.6005 - acc: 0.7783 - val_loss: 1.1008 - val_acc: 0.5300\n",
      "=====f值：0.5722007722007723=========\n"
     ]
    }
   ],
   "source": [
    "early_stopping = EarlyStopping(monitor='val_loss',patience=5)\n",
    "# checkpointer = ModelCheckpoint(filepath='tmp/weights.hdf5',verbose=1,save_best_only=True)\n",
    "model = target_sentence()\n",
    "opt = Adam(lr=0.001)\n",
    "#     opt = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08) 论文的不不可以，改成普通adam10epoch后会\n",
    "model.compile(loss='categorical_crossentropy',optimizer=opt,\n",
    "             metrics=['accuracy'])\n",
    "model.fit([train_data[0],train_data[1]],train_data[2],\n",
    "          batch_size=100,\n",
    "          epochs=nb_epoch,\n",
    "          verbose=1,\n",
    "          validation_data=([test_data[0],test_data[1]],test_data[2]),\n",
    "          callbacks=[early_stopping],\n",
    "          shuffle=True)\n",
    "y_pred = [i.argmax() for i in model.predict([test_data[0],test_data[1]])]\n",
    "y_true = [i.argmax() for i in test_data[2]]\n",
    "print(\"=====f值：{}=========\".format(f1_score(y_true,y_pred)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEWCAYAAACJ0YulAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4FWX2wPHvSa8QCKGlEBDpndBEFNeGXezYFldFXdt2\n3d8W3eLqquu6u/ZeERsqKmLZBRGlI713QkkCgZBez++PGUKI6eRmcpPzeZ773Dv9zJ1kzn3fd+Yd\nUVWMMcYYgACvAzDGGNN8WFIwxhhTzpKCMcaYcpYUjDHGlLOkYIwxppwlBWOMMeUsKZhGIyKviMhf\n6zjvdhE5w4exXCMiX/hq/b4kIveLyBvu5yQRyRGRwNrmbeC21ojI+IYuX8N654jITY29XuN7QV4H\nYExlIvIKkKqqv2/oOlT1TeDNRgvKI6q6E4hqjHVV9b2qav/GWLdpOaykYPyOiNiPGWN8xJJCK+NW\n2/xaRFaKSK6IvCginUTkMxHJFpGvRKRdhfkvdKsYDrlVAn0rTBsqIsvc5d4Gwipt63wRWe4u+52I\nDKpDfFOAa4DfuNUmH1eI+x4RWQnkikiQiNwrIlvc7a8VkYkV1jNZROZVGFYRuVVENrnxPCkiUsX2\nu4pIvoi0r7Sf+0UkWER6isjXIpLljnu7mv34TETuqDRuhYhc4n7+l4jsEpHDIrJURMZVs55kN/Yg\nd7i7u/1sEfkS6FBp/ndFZJ8b31wR6V+H7/UM93OoiDwuInvc1+MiEupOGy8iqSLySxFJF5G9InJD\n1UfxB/sQICK/F5Ed7rKviUhbd1qYiLwhIgfc47JYRDq50yaLyFZ3X7eJyDV12Z45Tqpqr1b0ArYD\nC4BOQDyQDiwDhuKc1P8H3OfO2wvIBc4EgoHfAJuBEPe1A/i5O+0yoBj4q7vsUHfdo4BA4MfutkMr\nxHFGNTG+cmQ9leJeDiQC4e64y4GuOD9urnRj7eJOmwzMq7C8Ap8AMUASkAFMqGb7/wNurjD8CPCM\n+/kt4HfuNsOAk6tZx/XAtxWG+wGHKuz/tUAsThXuL4F9QJg77X7gDfdzsht7kDs8H3gMCAVOAbKP\nzOtO/wkQ7U5/HFheh+/1DPfzn92/jY5AHPAd8Bd32nigxJ0nGDgXyAPaVbP/c4CbKsS0GeiBUxU2\nHXjdnXYL8DEQ4f6dDAfaAJHAYaC3O18XoL/X/z+t4WUlhdbpP6qapqq7gW+Ahar6vaoWAB/gnNDB\nOdF+qqpfqmox8CgQDpwEjMY5OTyuqsWq+h6wuMI2pgDPqupCVS1V1VeBQne5hvq3qu5S1XwAVX1X\nVfeoapmqvg1sAkbWsPxDqnpInXr62cCQauabCkwCcEsTV7njwEl83YCuqlqgqvOqXgUfAENEpJs7\nfA0wXVUL3djfUNUDqlqiqv/AOYn3rmnnRSQJGAH8QVULVXUuzgm1nKq+pKrZ7nbuBwYf+VVeB9cA\nf1bVdFXNAP4EXFdherE7vVhVZwI5tcVcYb2PqepWVc0Bfgtc5ZZ+inGSY0/372Spqh52lysDBohI\nuKruVdU1ddwPcxwsKbROaRU+51cxfKRhsytOaQAAVS0DduGUMLoCu1W1Yo+KOyp87gb80q0SOCQi\nh3B+5Xc9jrh3VRwQkesrVE8dAgZQqTqlkn0VPudRfQPu+8AYEemC82u8DCd5glNaEmCRW632k6pW\noKrZwKc4CQWcJFPe8C0ivxKRdW41zyGgbS2xg/PdHVTV3Arjyr9zEQkUkYfcKrXDOKUA6rDeiuuv\neAx3cOzxOqCqJRWGa/oOa1tvEE5p9XXgc2CaW2X1sIgEu/t4JXArsFdEPhWRPnXcD3McLCmYmuzB\nObkD5b+aE4HdwF4gvlK9fFKFz7uAB1Q1psIrQlXfqsN2q+u6t3y8+wv8eeAOIFZVY4DVOCfs46Kq\nB4EvcE5KVwPTjiQ/Vd2nqjeralecqo+nRKRnNat6C5gkImNwqppmu7GPw0kuV+BUv8QAWXWIfS/Q\nTkQiK4yr+J1fDVwEnIGTZJLd8UfWW1uXyMccb3fde2pZpi6qWm8JkOaWOv6kqv1wSqDn41S9oaqf\nq+qZOFVH63GOt/ExSwqmJu8A54nI6SISjFP3XYhT1zwf5x/7LrcB9hKOrbp5HrhVREaJI1JEzhOR\n6DpsNw2n/rkmkTgnuQwAt9FzQH12rhZTcU5Ol3G06ggRuVxEEtzBg24MZdWsYybOyfDPwNtuSQuc\nOv8SN/YgEfkjTj16jVR1B7AE+JOIhIjIycAFFWaJxjk+B3Dq6P9WaRW1fa9vAb8XkTgR6QD8EWjw\nPRCV1vtzt5E8yo3rbVUtEZHTRGSgOPdhHMapTioT5+KHi9wEWIhTVVXd92wakSUFUy1V3YDTIPof\nYD/OCegCVS1S1SLgEpwG3UycX9XTKyy7BLgZeALn5LnZnbcuXgT6udVCH1YT21rgHzjJKQ0YCHxb\nvz2s0QzgRGCfqq6oMH4EsFBEctx57lbVrdXEWIjznZxBhcSCU10yC9iIU5VSQKWqsRpcjdN4nwnc\nB7xWYdpr7vp2A2txGo0rqu17/StO0lkJrMK5AKFONyPW4iWcaqK5wDac/b3TndYZeA8nIawDvnbn\nDQB+gVPKyAROBW5rhFhMLeTYKmFjjDGtmZUUjDHGlLOkYIwxppwlBWOMMeUsKRhjjCnndx2LdejQ\nQZOTk70Owxhj/MrSpUv3q2pcbfP5XVJITk5myZIlXodhjDF+RUR21D6XVR8ZY4ypwJKCMcaYcpYU\njDHGlPO7NgVjTMtSXFxMamoqBQUFXofSIoSFhZGQkEBwcHCDlrekYIzxVGpqKtHR0SQnJyM/fBie\nqQdV5cCBA6SmptK9e/cGrcOqj4wxniooKCA2NtYSQiMQEWJjY4+r1GVJwRjjOUsIjed4v0tLCsYY\n45XSYsg/CM2ot2pLCsaYVu3QoUM89dRT9V7u3HPP5dChQ8e58V1wcDsUZB3fehqRJQVjTKtWXVIo\nKSmpYu6jZs6cSUxMTMM3XJQLhe5TWLNSoay04etqRJYUjDGt2r333suWLVsYMmQII0aMYNy4cVx4\n4YX069cPgIsvvpjhw4fTv39/nnvuufLlkpOT2b9/P9u3b6dv377cfPPN9O/fn7POOov8/PzaN5y9\nFyQQ2veAsmJnuBmwS1KNMc3Gnz5ew9o9hxt1nf26tuG+C/pXO/2hhx5i9erVLF++nDlz5nDeeeex\nevXq8ks6X3rpJdq3b09+fj4jRozg0ksvJTY29ph1bNq0ibfeeovnn3+eK664gvfff59rr722+qAK\nc6AwG9p0hbA2EBELuRkQ3h5CIhplvxvKSgrGGFPByJEjj7nG/9///jeDBw9m9OjR7Nq1i02bNv1g\nme7duzNkyBAAhg8fzvbt22veSPZeCAiCiA7OcJuuznDWLs8bna2kYIxpNmr6Rd9UIiMjyz/PmTOH\nr776ivnz5xMREcH48eOrvAcgNDS0/HNgYGDN1UeF2VCUA20SICDQGRcQBG3i4dAOyDsAkR0abX/q\ny0oKxphWLTo6muzs7CqnZWVl0a5dOyIiIli/fj0LFiw4vo2pwuG9EBDsVBlVFN4OQqLg8B7nUlWP\nWEnBGNOqxcbGMnbsWAYMGEB4eDidOnUqnzZhwgSeeeYZ+vbtS+/evRk9evTxbazwMBTnQttECKj0\nm1zEGZ+x3kkM7bod37YaSLQZ3TRRFykpKWoP2TGm5Vi3bh19+/b1OgzfU4X9G5xLTzv2Bammoubw\nHshJg9ieEBrdoE1V9Z2KyFJVTaltWas+MsaYplCQBcX5EN25+oQAENUZAkOcG9u0rOnic/ksKYjI\nSyKSLiKra5hnvIgsF5E1IvK1r2IxxhhPqTpXHAWGOped1iQgwKlGKi2EnPSmia/i5n247leACdVN\nFJEY4CngQlXtD1zuw1iMMcY7+QehpMAtJdShw7qwNhAWA9n7oKTQ9/FV4LOkoKpzgcwaZrkamK6q\nO935mz4lGmOMr6k6J/egMOcKo7pqG+8kkCa+d8HLNoVeQDsRmSMiS0Xkeg9jMcaYmmlZw36152c6\nVUHRXepWSjgiMMRZpjAbCo6z47168PKS1CBgOHA6EA7MF5EFqrqx8owiMgWYApCUlNSkQRpjDCVF\ncHAbFOc5v/bbdHVO2rXRMqeUEBwOYW3rv93IOMjLhKzdENrm6M1uPuRlSSEV+FxVc1V1PzAXGFzV\njKr6nKqmqGpKXFxckwZpjGnlCrOdS0lLCiAilqiEfpC+jj0bV3DZZZdWucj48eNZsmSJc0IvLaqy\nlPD444+Tl5dXPlxlV9wiEJPYpB3meZkUPgJOFpEgEYkARgHrPIzHGGOOUoXsNDiw2emGokNviEly\nLicNiaZrVBnvPfkX51LTqur8y46UEiKdX/mVVE4K1XbFHRLp9JGUmwFFeT+c3sh8eUnqW8B8oLeI\npIrIjSJyq4jcCqCq64BZwEpgEfCCqlZ7+aoxxvjCvffey5NPPlk+fP/99/PXv/yZ008dy7CTxjPw\njKv46Nv1EBx2dKHYHmzPDmLAqRdB5lbyd6/lqisup2/fvkycONHp+6jgEJQVc9vvHiZlxAj69+/P\nfffdBzid7O3Zs4fTTjuN0047DTjaFTfAY489xoABAxgwYACPP/44tOnC9t1p9B04uP5ddNeTz9oU\nVHVSHeZ5BHjEVzEYY/zMZ/fCvlWNu87OA+Gch6qdfOWVV/Kzn/2M22+/HYB33n6bz998gruuOI02\n8b3Zny+MHjOGCy+++NjnH4dGQ1AotInn6UcfJiKgmHULvmLltnSGpYxwqo5CuvPAg3+nfWwspaWl\nnH766axcuZK77rqLxx57jNmzZ9Ohw7Gd3y1dupSXX36ZhQsXoqqMGjWKU089lXbterBpyzbeevvd\nunfR3QB2R7MxplUbOnQo6enp7NmzhxUL5tIuKpTOce34v8ffYNBJZ3DGmWeye/du0tLSql5BVEfm\nfr+Ra6++EnLTGdQ5iEH9+4CWQnQX3nn3XYYNG8bQoUNZs2YNa9eurTGeefPmMXHiRCIjI4mKiuKS\nSy7hm2++gaCQ+nfR3QDWIZ4xpvmo4Re9L11+2WW89/oL7EvdzpUTz+fNL78nI/MQS5cuJTg4mOTk\n5Cq7zC4nARDVCTr0OvpozeAItu3J4NFHH2Xx4sW0a9eOyZMn17yeWtSri+4GspKCMaZ1Ky3iyrNG\nM+3d93nvszlcPvk2srJz6NixI8HBwcyePZsdO3bUuIpTTjmFqVOnQkgkq/cVsXLdJojuwuHDh4mM\njKRt27akpaXx2WeflS9TXZfd48aN48MPPyQvL4/c3Fw++OADxo0b1+i7XR0rKRhjWi8tg/2b6N8z\ngeyCYuITu9GlazzXXHMNF1xwAQMHDiQlJYU+ffrUuJrbbruNG264gb59+9K3b1+GDx8OgUEMHjyY\noUOH0qdPHxITExk7dmz5MlOmTGHChAl07dqV2bNnl48fNmwYkydPZuTIkQDcdNNNDB061CdVRVWx\nrrONMZ7ytOvs/EPOTWntukN4FZeD+inrOtsYYxoi74DzFLSG3G3cQllSMMa0TiVFzpPQImLr1ydR\nC2dJwRjjOU+qsfMOOO8RtTzfwM8c73dpScEY46mwsDAOHDjQtIlB1UkKR25AayFUlQMHDhAWFlb7\nzNWwq4+MMZ5KSEggNTWVjIyMpttocb7Tl1BkB0hvWV2uhYWFkZCQ0ODlLSkYYzwVHBxM9+7dm3aj\n066BnQvgF+sgqA5dYLciVn1kjGldstNg4ywYMskSQhUsKRhjWpcVU6GsBIb92OtImiVLCsaY1kMV\nlr0GSSdBhxO9jqZZsqRgjGk9ts+DzK0w3EoJ1bGkYIxpPZa9CqFtod9FXkfSbPnyyWsviUi6iNT4\nNDURGSEiJSJyma9iMcYY8jJh7QwYdAUEh3sdTbPly5LCK8CEmmYQkUDg78AXPozDGGNg5TtQWmhV\nR7XwWVJQ1blAZi2z3Qm8D6T7Kg5jjHEamF+FrsOcx3OaannWpiAi8cBE4Ok6zDtFRJaIyJImvevR\nGNMypC6B9LUw7HqvI2n2vGxofhy4R1XLaptRVZ9T1RRVTYmLi2uC0IwxLcqyVyE4EgZa02VtvOzm\nIgWYJk6XtR2Ac0WkRFU/9DAmY0xLU5gNq6fDgEucDvBMjTxLCqpa3tmJiLwCfGIJwRjT6Fa9B8W5\ndgdzHfksKYjIW8B4oIOIpAL3AcEAqvqMr7ZrjDHHWPYadOwHCbU+idLgw6SgqpPqMe9kX8VhjGnF\n9q2CPctgwt/t6Wp1ZHc0G2NarqWvQmCoc8OaqRNLCsaYlqk437lhrd+FLe6Rm75kScEY0zKt/QgK\ns6yBuZ4sKRhjWqalr0L7HpB8steR+BVLCsaYlmf/Jtj5nXMHszUw14slBWNMy1KYDR/e5jQwD77a\n62j8jpd3NBtjTOMqzoe3JsHuZXDFaxDdyeuI/I4lBWNMy1BSBO9c7zxd7ZLnoe/5XkfklywpGGP8\nX2kJTL8ZNn0B5z8Ogy73OiK/ZW0Kxhj/VlYGM+6EtR/CWQ9Ayg1eR+TXLCkYY7y1Zzl8+FOnHaC+\nVOGz38CKqTD+t3DSHY0fXytjScEY453SEichLH8Tnj8Npl0DaWvqvvx//wSLn4cxd8Cp9/guzlbE\nkoIxxjtLXoT0NXDRU3Da72DbXHh6LLz3E+deg5rMfRTm/ROG3wBn/dXuR2gk1tBsjPFG7n6Y/QD0\nGA9DrnZO6iNugvlPwIJnYM0HMHgSnPobaJd87LILnob//QUGXQnnPWYJoRFZScEY443//gmKcuGc\nh4+e1CPaw+l/hLtXwKjbnAfk/Gc4fPJzyNrtzLPsdZh1L/Q53ylhBNhprDH57NsUkZdEJF1EVlcz\n/RoRWSkiq0TkOxEZ7KtYjDHNzO5lzsl91K0Q1/uH06PiYMLf4O7lMHyyM++/hzrVSjPuhBNOh8te\ngkCr7GhsvkyxrwATapi+DThVVQcCfwGe82EsxpjmoqwMZv4aIuNqbxxu0xXO+wfcuRQGXu5UKSWN\ngSvfgKDQpom3lfHlk9fmikhyDdO/qzC4AEjwVSzGmGZkxVTYvQQufhrC2tRtmXbd4OIn4fQ/QHh7\nCArxbYytWHMpe90IfOZ1EMaYOigtaXi1TUEWfHU/JIyAQVfVf/nozg3brqkzz1toROQ0nKRQbTlS\nRKaIyBIRWZKRkdF0wRljjvX1w/D3brDuk4YtP+ch56qjcx+xBuJmytOjIiKDgBeAi1T1QHXzqepz\nqpqiqilxcXFNF6Ax5qi1M5xLSCUQ3r7WuSy0PtLXwcJnYfiPoetQ38RojptnSUFEkoDpwHWqutGr\nOIwxdZC2Bj64FeJT4GcroM95zmWhn90LZaW1L3+kO4rQaPjRH30fr2kwn7UpiMhbwHigg4ikAvcB\nwQCq+gzwRyAWeEqca5RLVDXFV/EYYxooLxOmXe2c0K98A8LbOc8q+Px3sPBpyNrldFUdElH9OtZ+\n5NytfO6jEBnbdLGbehNV9TqGeklJSdElS5Z4HYYxrUNpCbx5Gez4FibPhMQRx05f8DTM+i3ED4dJ\n05z7CyoryoMnR0JYW5jytd1b4BERWVqXH97W0mOMqd5X98HW2c69ApUTAsDo25zSQ9oaePEM2L/5\nh/PM+6dTmjjnYUsIfqDVJIWC4lJem7+d0jL/KhkZ45mV7zj9EI24GYZdX/18fc+HyZ9AYY6TGHbM\nPzotcxt8+y/nxrPksb6P2Ry3VpMUZizfwx8/WsPklxeRmVvkdTjGNG97vne6k+h2Mkx4sPb5E1Lg\npq8gIhZeuwhWv++M//x3EBAEZ/7Zt/GaRtNqksLlKQn8beJAFm7N5Px/f8P3Ow96HZIxzVNOuvNc\ng8g4uOJVCAyu23Ltu8ONX0L8MKePoulTYMOncOqvne4qjF9oNUlBRLh6VBLv33YSAQHCFc/O55Vv\nt+FvDe3G+FRJEbxzvXPF0VVvQmSH+i0f0R6u+xD6XwIr34b2J8Don/omVuMTrSYpHDEwoS2f3Hky\np5wYx/0fr+WuacvJLSzxOixjmodZ98LO+XDRE9ClgR0XB4fBpS/C+Y/Dla9bx3V+ptUlBYCYiBCe\nvz6FX5/dm09X7uHCJ+axKS3b67BMa7bpS3jhTNi12LsYlr7iPAntpLtg4GXHt66AAEi5ATr1b5TQ\nTNNplUkBICBAuP20nrxx0yiy8ou56Mlv+Wj5bq/DMq1RTjp8cAukLoKXJ8D8p5w7gJvSzoXw6a+c\n5xSccX/Tbts0K60nKezfBK+cD3tXHDP6pBM68Old4+jftQ13T1vOHz5cTWFJHW7bN6YxqMKMu5zL\nOX/yBZx4Fnz+W6devyDLd9vNSYc1HzrdVDx7ipOMYhLhshchINB32zXNXuu5k+TQTqdDrufGQ8pP\n4Ee/d27XBzq1CWPqzaN55PMNPDd3KytTD/HI5YPp1Sna25hNy7f8Tdj4GZz9N0gaBYlT4bv/ON1L\np62Gy1+FLoOObxuqkLnVaSvYOd+5jyBzizMtKNy5nPSUX8PQ68r/J0zr1bq6ucg/BLP/Boufd/74\nz/gTDLnmmC58Z63ex6/fXUF2YQk9O0ZxVr9OnNW/M4Pi2xIQYA8HN43o4A54eqzToPvjj4/tSnrH\nfHjvBucqoHMfcW4eq8/D6QtzYMNMWP+pkwhy0pzx4e2cJ5cljYFuJ0HnQfbAmlairt1ctK6kcMTe\nlc7jAHctcHp9PO/RY7ryTT9cwGer9/HF2n0s2JpJaZnSuU0YZ/brxFn9OzGqeywhQa2n5s34QFkZ\nvHqBU51527fOk8Uqy8mA6TfB1jnOA2nOfwxCIqtfZ0kRbPkfrHoH1s+EknyI7grJJ0O3MZB0EnTo\nZc8xaKUsKdRGFVZMgy//CLkZzpUSP/qDc511BYfyivjf+nS+WJPG1xszyC8uJTosiAm9opjYNYth\n7QoI6zEWojsdf0ym9Zj/JHz+f3DRkzD02urnKyuFuY84D6eJ6+P0ThrXq8L0MufHzap3necX5x90\nSgP9J8LAKyBxlCUBA1hSqLuCLJj9ICx6zunF8Yz7YOj1R/+RVCF7L+xbRfGelWRuXkpA+ipii3YT\nwNHvLrNtf8L6n0dE/3Ogy5Dj+0csKXTeJdBp9KtPtUFzUpTnnLC2z3P2Kaqjc5dsZEenN81I91Xb\nHbOqUJzvHKvCw1BwGAqzoGM//7xTNn2907jb83S4amrdju+W/8H7N0FxAVz4bydBrHrX6U4iaxcE\nR0Dvc50+hk74kVUJmR+wpFBf+1bDzF859a9dhzn1rftWOY19eRUeCtcuGToPpLTjADYHdGfe3gBK\nNs8mpWgRQ2UzAaLkhXRATzyTyP7nwgmnOf3QVyX/oHOCyHBf6euc9yP1v+XETQ6BFd4DnPegMKcf\n+5BICI6s/nNwGJSVOF0hlxW7nyu+Fzu/SkuLnRN1xz4Q1xfietf94eolRc4D2bfNdV67FjnrDQiC\ngGCnOqMq4e2OJouwtlCU7Zz4y5NAlhNnZQFBzp2zY26HrkPqFqPXSoqcTuOyUuGnC5xEWVeH98C7\nNziJFpzjf8KPYNAVTkIIjfJNzKZFsKTQEKpOz5Bf/sFplO7UDzoNcBrjOg90bsSp4gSpqqzZc5hv\nlq8je/Us+ubM59SAlbSRPEoliML4MYT3PwcJjoCMDZSmr0XT1hOUd/TkXyBh7AxIZF1pVzYVdyQs\nJIi2YQG0DQ2kTagQHRpAdIgQFRJAVIgQESwEUQYlBc4v8qJcKHbfK38uLawUsTi/zgOCnRNroHvS\nDgx2kk522rEn8Dbxzi/Tjm6SOJIsQiJh7/KjSWDnAme7iHOS7n6K80oc7cxblONU1eVkQG66c1lk\n7v4KnzOcZBAa7XzPYW0h1H0Pa1Phc1sIDnfqzZe95iSRbic7yaHXhOZdXfK/B2Duw053030vqP/y\npcWw2L1stP/E+ndDYVotz5OCiLwEnA+kq+qAKqYL8C/gXCAPmKyqy2pbb5M8ZKes1EkQDez7fdv+\nXL5ctYudK+aQuH8uPwpYzokBzo1x+YSysSyeTZrAprJ4NmoC2yWRgJhE4mOjSGwXTmxUKPtzCtl7\nKJ+9WQXszSogK7/4B9vpEBXCoIQYJvTvzJn9OtEuspoqg9ISJ3kEBB098de2/4d2Hlt6yVgPGRuP\nTRZB4UeH4/oeTQLJY5vu0saCLCcxLHgGDqc6fe2M+SkMnlRzo6wXUpfAi2c5v+wnPuN1NKaVaQ5J\n4RQgB3itmqRwLnAnTlIYBfxLVUfVtl5/e/Ja2uECvlibxrKVKykuLiE8LpmE9lEktg8nsX0Eie0i\n6BgdWuvlrnlFJezNKmBfVgF73GSx+2A+8zbvZ/ehfAIDhNE92jNhQBfO7t+JjtFhjb8zZaVwaMfR\nKq+cdOca9+6n1K8axBdKS2DdR/DdE7BnmZOUUn7iPAugTZcfzl9W6pQG8zOdarw8971NF0gYWfOj\nJRuiKA+eHee0Cfz0O6e0Y0wT8jwpuEEkA59UkxSeBeao6lvu8AZgvKrurWmd/pYUfE1VWb37MLPW\n7OWz1fvYmpGLCKR0a8eEAV2YMKAz8THhVS6bX1TK3qyjpZG9h/LZe7iAiOBAunWIJDk2guTYSLrG\nhBPoL/doqDrVWPOfcK7RDwiCnmeAlron/kznvSALqOZvPyDY6f6521jnlTSq+nahupr5G1j0LFw/\nA3qcenzrMqYB/CEpfAI8pKrz3OH/Aveo6g/O+CIyBZgCkJSUNHzHjh0+i9mfqSqb0nP4bNU+Plu9\nl/X7nE7+Bie05aSeHTicX3w0AWTlcyjvh1VS7SNDyCsqoaC4rHxccKCQ2N5JEN1ij7736BBFQrvw\n5ntTX+ZWp1pp85cQEuVcbhzevvr38Bg4sAV2zIPt3zoPmtFSp0G3y2CnWqzbyZA02pm3rrbMhtcv\nhlG3wTkP+W5/jalBi0oKFVlJoe627c9l1up9zFq9lxWpWbSPDKFzmzC6xoTRuW0YXdqG06XCe+e2\nYYQFB6KqpGcXsm1/LjsO5LL9QJ7zvj+P7QdyySs62jdUeHAgJ3aKolenaPp0jqZXp2h6d46mY3Qo\n4q+X0h5dYlQ7AAAadUlEQVRRmON0Urf9W+fB9buXQmkRIE6je0w3aBvvNMS3TXBebeKdy2SPXGab\nfwiePslp37hlrtNAbowH/CEpWPVREyot00apAlJVMnIK2XEgj60ZOWxMy2HDvmw2pGWTkX30KqeY\niGAnQXSKplfnaLq0CSM6LIiosCCiQ4PLPwcHNuMrhSorzncai3d857RbZKU6r4JDlWYUiOrkJIyS\nQqex/qYvIX64J2EbA3VPCl52iDcDuENEpuE0NGfVlhBMwzVWm4CI0DE6jI7RYYxIPvbu78zcIjbs\ny2ZjWjbr3fcPv99Ndg0PMQoLDiAqNJg2bpKIjQxhdI9YTu0dR+9O0c2rtBEcDt3HOa+KCnPg8G4n\nQRzeDVlHPqc6Dczn/N0SgvEbvrz66C1gPNABSAPuA4IBVPUZ95LUJ4AJOJek3lBb1RFYScHfqCp7\nswrYn1NIdkGJ+yomp7CEnIISsguPHbf7YD6b0nMA6NwmjFN7xXFq7zjG9uxA2/A6PivYGPMDzaL6\nyBcsKbR8e7Pymbsxg683ZvDNpv1kF5QQGCAMTYwpTxIDulqvtcbUR6MmBRG5G3gZyAZeAIYC96rq\nF8cbaH1ZUmhdSkrLWL7rEF+7SWJlqvPgmfaRIZzRtyMXDO7KmB6xBPlT24QxHmjspLBCVQeLyNnA\nLcAfgNdVddjxh1o/lhRat/05hczbtJ85G9L5al06OYUldIgK4byBXbhwSFeGJbVrXu0QxjQTjd3Q\nfOS/7FycZLBG7D/PeKBDVCgXD43n4qHxFBSXMmdDOjNW7GHa4l28On8H8THhnD+4CxcO7kq/Lm0s\nQRhTT3UtKbwMxAPdgcFAIM7lpE1+SYWVFExVsguK+XJtGh+v2MM3m/ZTUqacEBfJhYPjuXBIV7p3\naGb9IBnTxBq7+igAGAJsVdVDItIeSFDVlccfav1YUjC1ycwt4rPVe5mxfA+LtmeiCiOT23PVyETO\nHdiFsGB7ML1pfRo7KYwFlqtqrohcCwzD6cCuyfubsKRg6mNfVgEfLt/NtEU72X4gjzZhQUwcGs9V\nI5Po26WOz4kwpgVo7KSwEqfaaBDwCs4VSFeoapP37GVJwTSEqjJ/6wGmLdrFrNX7KCotY0hiDJNG\nJnL+oK5Ehnp5H6cxvtfYSWGZqg4TkT8Cu1X1xSPjGiPY+rCkYI7Xwdwipn/vlB42pecQGRLIhUPi\nmTQykYHxba1x2rRIjZ0UvgZmAT8BxgHpwApVHXi8gdaXJQXTWFSVZTsPMnXhLj5dtYeC4jKCA4WI\nkCAiQwIJDwkkMjSIiJBAIkOCnOGQICJCA+nSNoxrRnWzEobxG42dFDoDVwOLVfUbEUnC6bzuteMP\ntX4sKRhfyMovZuaqvezMzCO/qJTcwhLyikrJLXLe84pKyCs8OpxdUEJ8TDgPXjKQU3rFeR2+MbVq\n9G4uRKQTMMIdXKSq6ccRX4NZUjDNwdIdmfzmvZVsycjl0mEJ/OH8vsREVPM4VGOagbomhTr1DSAi\nVwCLgMuBK4CFInLZ8YVojP8a3q09M+8ex10/6slHy3dzxmNf8+nKvfhbX2LGVFbnbi6AM4+UDkQk\nDvhKVQf7OL4fsJKCaW7W7T3MPe+vZGVqFmf268RfLx5ApzY+eEa2McehUUsKQECl6qID9VjWmBat\nb5c2TL/tJH53bl++2ZTBGY99zVuLdlqpwfilup7YZ4nI5yIyWUQmA58CM30XljH+JSgwgJtP6cHn\nPzuFAV3b8tvpq7j6+YVs35/rdWjG1Et9GpovBca6g9+o6gc+i6oGVn1kmjtV5e3Fu3hg5jqKSsr4\n/fn9uHZUkt3/YDzV2NVHqOr7qvoL91WnhCAiE0Rkg4hsFpF7q5jeVkQ+FpEVIrJGRG6oazzGNFci\nwlUjk/jqF6cy5oRY/vDhan734WqKSsq8Ds2YWtWYFEQkW0QOV/HKFpHDtSwbCDwJnAP0AyaJSL9K\ns90OrHUbrMcD/xARu67PtAid2oTx4o9HcNv4E5i6cCfXvbiQzNwir8MypkY1JgVVjVbVNlW8olW1\ntt7ERgKbVXWrqhYB04CLKm8CiHafzRAFZALVP+XdGD8TGCDcM6EPj185hO93HeKiJ+exYV+212EZ\nUy1fXkEUD+yqMJzqjqvoCaAvsAdYBdytqj8oY4vIFBFZIiJLMjIyfBWvMT5z8dB43rllDIXFZVzy\n1Ld8uTbN65CMqZLXl5WeDSwHuuI8r+EJEflBCURVn1PVFFVNiYuzLgWMfxqSGMOMO07mhI5RTHl9\nCU/O3myXrZpmx5dJYTeQWGE4wR1X0Q3AdHVsBrYBfXwYkzGe6tw2jHduGcMFg7ryyOcb+Nnbyyko\nLvU6LGPK+TIpLAZOFJHubuPxVcCMSvPsBE6H8r6VegNbfRiTMZ4LCw7kX1cN4ddn92bGij1c+ex8\n9mUVeB2WMYAPk4KqlgB3AJ8D64B3VHWNiNwqIre6s/0FOElEVgH/Be5R1f2+ismY5kJEuP20njx3\nXQqb03O48Il5LN91yOuwjKn7zWvNhd28Zlqa9fsOc9OrS0g7XMC1o7tx549OpH2kXZltGlej37xm\njPGNPp3bMOOOk7l0WAKvfredUx+ezRP/20RekV2dbZqeJQVjmoH2kSE8dOkgvvj5KYw5IZZHv9jI\n+EfmMHXhTkpK7U5o03QsKRjTjPTsGM1z16fw/m1jSGofwf99sIqz/jmXWavtWQ2maVhSMKYZGt6t\nPe/eOobnr08hIEC49Y1lTHzqOxZuPeB1aKaFs6RgTDMlIpzZrxOz7h7Hw5cOYl9WAVc+t4AbX1nM\n1owcr8MzLZQlBWOauaDAAK4YkcjsX43nngl9WLQ9kwuf+JbP1+zzOjTTAllSMMZPhIcEctv4E/j8\nZ6dwQlwkt7y+lEc+X09pmbU1mMZjScEYP9M1Jpy3bxnDpJGJPDl7C5NfXsRB65LbNBJLCsb4obDg\nQB68ZBAPXjKQhVszueCJeazeneV1WKYFsKRgjB+bNDKJd24dQ2mZcunT3zF9WarXIRk/Z0nBGD83\nJDGGj+88maFJMfzinRXc95E9+tM0nCUFY1qADlGhvHHjKG4e151X5+/g6ucXkH7Yel419WdJwZgW\nIigwgN+d14//TBrKmj2HOe8/81iyPdPrsIyfsaRgTAtzweCufHj7WCJDApn0/ALmbrRH2Jq6s6Rg\nTAvUu3M0H91+Mj07RnPL60tZtvOg1yEZP+HTpCAiE0Rkg4hsFpF7q5lnvIgsF5E1IvK1L+MxpjVp\nGxHMaz8ZSac2odzw8mI27Mv2OiTjB3yWFEQkEHgSOAfoB0wSkX6V5okBngIuVNX+wOW+iseY1igu\nOpTXbxxFWHAA1724kF2ZeV6HZJo5X5YURgKbVXWrqhYB04CLKs1zNTBdVXcCqGq6D+MxplVKbB/B\n6zeOoqi0jGtfXEh6tl2VZKrny6QQD+yqMJzqjquoF9BOROaIyFIRub6qFYnIFBFZIiJLMjKs0cyY\n+urVKZqXJ48gI7uQ619cRFZ+sdchmWbK64bmIGA4cB5wNvAHEelVeSZVfU5VU1Q1JS4urqljNKZF\nGJrUjmevG86WjBxufGUx+UWlXodkmiFfJoXdQGKF4QR3XEWpwOeqmquq+4G5wGAfxmRMqzbuxDj+\nddVQlu08yG1vLrU7n80P+DIpLAZOFJHuIhICXAXMqDTPR8DJIhIkIhHAKGCdD2MyptU7d2AXHpg4\nkDkbMvjluyus621zjCBfrVhVS0TkDuBzIBB4SVXXiMit7vRnVHWdiMwCVgJlwAuqutpXMRljHJNG\nJpGVX8xDn62nbXgQf7loACLidVimGfBZUgBQ1ZnAzErjnqk0/AjwiC/jMMb80K2nnsDBvCKe/Xor\n7SJC+OVZvb0OyTQDPk0Kxpjm7d4JfcjKK+Y//9tMXHQo149J9jok4zFLCsa0YiLCAxMHsj+nkD9/\nvJZ+XdqQktze67CMh7y+JNUY47HAAOEfVwwhoV04P31zmd3c1spZUjDG0DY8mKevHc7hgmLumPo9\nxaV2qWprZUnBGANA3y5tePCSgSzalsnDs9Z7HY7xiCUFY0y5iUMTuH5MN57/ZhszV+31OhzjAUsK\nxphj/P68fgxNiuHX765gc7p1t93aWFIwxhwjJCiAp64ZRlhwILe+sYzcwhKvQzJNyJKCMeYHurQN\n5z+ThrI1I4ffvL8SVesKo7WwpGCMqdJJPTvwmwl9+HTlXl6ct83rcEwTsaRgjKnWLaf04Oz+nXjw\ns/Us2pbpdTimCVhSMMZUS0R45PLBdGsfwe1Tl5F+2G5sa+ksKRhjatQmLJhnrhtOTkEJt09dZje2\ntXCWFIwxterVKZqHLh3I4u0HuW/GGrZk5FBiyaFFsg7xjDF1ctGQeJbvOsTL325n6sKdBAcKybGR\n9OwYxQlxUfTs6Lx6xEUSEWKnFn9lR84YU2d/PL8fE4fGszEth83pOWzJyGHDvmy+WJt2zBPc4mPC\n6dkxitvGn8DoHrEeRmzqy6dJQUQmAP/CefLaC6r6UDXzjQDmA1ep6nu+jMkY03AiwqCEGAYlxBwz\nvrCklB0H8pxEkZ7D5owcFm3L5JbXlzLz7nHEx4R7FLGpL58lBREJBJ4EzgRSgcUiMkNV11Yx39+B\nL3wVizHGt0KDAunVKZpenaLLx+04kMt5/57HXW99z7QpowkOtCZMf+DLozQS2KyqW1W1CJgGXFTF\nfHcC7wPpPozFGNPEusVG8rdLBrJ0x0H++eVGr8MxdeTLpBAP7KownOqOKyci8cBE4OmaViQiU0Rk\niYgsycjIaPRAjTG+ceHgrlw1IpGnv97CN5vsf9cfeF2eexy4R1VrvLZNVZ9T1RRVTYmLi2ui0Iwx\njeG+C/pzYscofv72cnuqmx/wZVLYDSRWGE5wx1WUAkwTke3AZcBTInKxD2MyxjSx8JBAnrh6GDmF\nJfzi7RWUlVnnes2ZL5PCYuBEEekuIiHAVcCMijOoandVTVbVZOA94Keq+qEPYzLGeKBXp2juv6A/\n8zbv5+mvt3gdjqmBz5KCqpYAdwCfA+uAd1R1jYjcKiK3+mq7xpjm6coRiVwwuCuPfbmRJdutc73m\nSvytn/SUlBRdsmSJ12EYYxogu6CY8/49j5LSMmbePY6YiBCvQ2o1RGSpqqbUNp/XDc3GmFYkOiyY\nJ64eSkZOIb961x7e0xxZUjDGNKlBCTHcM6EPX61L45XvtnsdjqnEkoIxpsndeHJ3Tu/TkQdnrmf1\n7iyvwzEVWFIwxjS5Iw/vaR8Zwh1Tl5FTWOJ1SMZlScEY44n2kSH8e9JQdmbmcc97K8m1xNAsWFIw\nxnhmZPf2/PKs3ny6ai+jH/wvD3y6ll2ZeV6H1arZJanGGM8t23mQl7/dzsxVe1FVzurXmZ+c3J0R\nye0QEa/DaxHqekmqJQVjTLOx51A+ry/YwdSFO8nKL2ZAfBt+MrY75w3qQmhQoNfh+TVLCsYYv5Vf\nVMr071N5+dvtbE7PIS46lGtHdeOa0Ul0iAr1Ojy/ZEnBGOP3VJVvNu3npW+3MWdDBiFBAUw+KZlf\nnNmLsGArOdRHXZOCPaPZGNNsiQin9IrjlF5xbMnI4Zk5W3hu7la+XJvG3y8dxMju7b0OscWxq4+M\nMX7hhLgoHrl8MFNvGkVJWRlXPDuf+z5abZeyNjJLCsYYv3JSzw7MuvsUJp+UzGsLdnD243P5dvN+\nr8NqMSwpGGP8TmRoEPdf2J93bhlDSGAA17ywkN9OX8nhgmKvQ/N7lhSMMX5rRHJ7Zt49jltO7cHb\ni3dx9j/nMntDutdh+TWfJgURmSAiG0Rks4jcW8X0a0RkpYisEpHvRGSwL+MxxrQ8YcGB/Pacvkz/\n6Viiw4K44eXF/OKd5RzKK/I6NL/ks6QgIoHAk8A5QD9gkoj0qzTbNuBUVR0I/AV4zlfxGGNatiGJ\nMXx858nc9aOezFi+hx/942senrXeus2oJ1+WFEYCm1V1q6oWAdOAiyrOoKrfqepBd3ABkODDeIwx\nLVxoUCC/OKs3H90xlmFJ7Xjm6y2c8shsbnh5Ef9dl0ZpmX/dl+UFX96nEA/sqjCcCoyqYf4bgc+q\nmiAiU4ApAElJSY0VnzGmherftS0v/DiF3YfymbZoJ9MW7+LGV5cQHxPO1aOSuCIlkbhouzO6Kj67\no1lELgMmqOpN7vB1wChVvaOKeU8DngJOVtUDNa3X7mg2xtRXcWkZX65N440FO/huywGCA4Wz+3fm\nmlHdGN2jfavodK853NG8G0isMJzgjjuGiAwCXgDOqS0hGGNMQwQHBnDuwC6cO7ALWzJyeHPBTt5b\nuotPVu6lZ8corh6ZxCXD4omJCPE6VM/5sqQQBGwETsdJBouBq1V1TYV5koD/Ader6nd1Wa+VFIwx\njSG/qJRPVu7hjYU7WbHrECFBAZw7oDOTRiYxsnvLKz00iw7xRORc4HEgEHhJVR8QkVsBVPUZEXkB\nuBTY4S5SUlvQlhSMMY1t7Z7DTFu8kw+W7Sa7sIQecZFMGpHEpcMTaB/ZMkoPzSIp+IIlBWOMr+QX\nlfLpqr28tWgnS3ccJCQwgLMHdGbSiERG94glIKBupYfSMiWwjvM2FUsKxhhzHDamZfPWop1MX7ab\nrPxikmMjOLt/Z0rLlNyiErILSsgtLCG3sJScwhJyCp3hnMISCkvKOLlnB+67oB8ndor2elcASwrG\nGNMoCopLmbV6H1MX7WTx9kzCgwOJDA0iyn1FhgZW+Oy8A7y1aCd5RaX8+KRk7j7jRNqEBXu6H5YU\njDGmkalqnRugD+QU8ugXG5i2eBexkaHce04fLhkaX+cqqMZW16RgHeIZY0wd1eeKpNioUB68ZBAf\n3T6WxPbh/OrdFVz6zHesSs3yYYTHz5KCMcb40KCEGN6/9SQevXwwuzLzufDJefx2+koO5BTWeR2q\nysHcIg7m+r6TP3scpzHG+FhAgHDZ8ATO6t+Jf321iVe/286nK/fyy7N6c82oJESE9OwCdh/MZ/eh\nfFLd990H89lzyPmcV1TK7aedwK/P7uPTWK1NwRhjmtimtGzu/3gN324+QExEMDkFJZRU6qyvXUQw\n8e3CiY8JJz4mgvh24aR0a8fgxJgGbbM5dHNhjDGmCid2iuaNG0cxa/U+/rs+nY7RoRUSQDhdY8KJ\nDPXm9GxJwRhjPCAinDOwC+cM7OJ1KMewhmZjjDHlLCkYY4wpZ0nBGGNMOUsKxhhjyllSMMYYU86S\ngjHGmHKWFIwxxpSzpGCMMaac33VzISIZHH18Z311APY3YjjNQUvbp5a2P9Dy9qml7Q+0vH2qan+6\nqWpcbQv6XVI4HiKypC59f/iTlrZPLW1/oOXtU0vbH2h5+3Q8+2PVR8YYY8pZUjDGGFOutSWF57wO\nwAda2j61tP2BlrdPLW1/oOXtU4P3p1W1KRhjjKlZayspGGOMqYElBWOMMeVaTVIQkQkiskFENovI\nvV7H0xhEZLuIrBKR5SLid88oFZGXRCRdRFZXGNdeRL4UkU3uezsvY6yvavbpfhHZ7R6n5SJyrpcx\n1oeIJIrIbBFZKyJrRORud7xfHqca9sefj1GYiCwSkRXuPv3JHd+gY9Qq2hREJBDYCJwJpAKLgUmq\nutbTwI6TiGwHUlTVL2+6EZFTgBzgNVUd4I57GMhU1Yfc5N1OVe/xMs76qGaf7gdyVPVRL2NrCBHp\nAnRR1WUiEg0sBS4GJuOHx6mG/bkC/z1GAkSqao6IBAPzgLuBS2jAMWotJYWRwGZV3aqqRcA04CKP\nY2r1VHUukFlp9EXAq+7nV3H+Yf1GNfvkt1R1r6oucz9nA+uAePz0ONWwP35LHTnuYLD7Uhp4jFpL\nUogHdlUYTsXP/xBcCnwlIktFZIrXwTSSTqq61/28D+jkZTCN6E4RWelWL/lFVUtlIpIMDAUW0gKO\nU6X9AT8+RiISKCLLgXTgS1Vt8DFqLUmhpTpZVYcA5wC3u1UXLYY6dZstoX7zaaAHMATYC/zD23Dq\nT0SigPeBn6nq4YrT/PE4VbE/fn2MVLXUPRckACNFZECl6XU+Rq0lKewGEisMJ7jj/Jqq7nbf04EP\ncKrJ/F2aW+97pP433eN4jpuqprn/tGXA8/jZcXLrqd8H3lTV6e5ovz1OVe2Pvx+jI1T1EDAbmEAD\nj1FrSQqLgRNFpLuIhABXATM8jum4iEik21CGiEQCZwGra17KL8wAfux+/jHwkYexNIoj/5iuifjR\ncXIbMV8E1qnqYxUm+eVxqm5//PwYxYlIjPs5HOeCmvU08Bi1iquPANxLzB4HAoGXVPUBj0M6LiLS\nA6d0ABAETPW3fRKRt4DxON38pgH3AR8C7wBJOF2kX6GqftNwW80+jcepllBgO3BLhbreZk1ETga+\nAVYBZe7o/8Oph/e741TD/kzCf4/RIJyG5ECcH/rvqOqfRSSWBhyjVpMUjDHG1K61VB8ZY4ypA0sK\nxhhjyllSMMYYU86SgjHGmHKWFIwxxpSzpGBMExKR8SLyiddxGFMdSwrGGGPKWVIwpgoicq3bR/1y\nEXnW7XAsR0T+6fZZ/18RiXPnHSIiC9zO1D440pmaiPQUka/cfu6XicgJ7uqjROQ9EVkvIm+6d9ka\n0yxYUjCmEhHpC1wJjHU7GSsFrgEigSWq2h/4GuduZYDXgHtUdRDOnbJHxr8JPKmqg4GTcDpaA6dn\nzp8B/XA6YRvr850ypo6CvA7AmGbodGA4sNj9ER+O05lYGfC2O88bwHQRaQvEqOrX7vhXgXfdfqni\nVfUDAFUtAHDXt0hVU93h5UAyzoNRjPGcJQVjfkiAV1X1t8eMFPlDpfka2kdMYYXPpdj/oWlGrPrI\nmB/6L3CZiHSE8mfddsP5f7nMnedqYJ6qZgEHRWScO/464Gv3qV6pInKxu45QEYlo0r0wpgHsF4ox\nlajqWhH5PfCFiAQAxcDtQC7OA0x+j1OddKW7yI+BZ9yT/lbgBnf8dcCzIvJndx2XN+FuGNMg1kuq\nMXUkIjmqGuV1HMb4klUfGWOMKWclBWOMMeWspGCMMaacJQVjjDHlLCkYY4wpZ0nBGGNMOUsKxhhj\nyv0/0vkIQ8WGBdsAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x1b2a2871eb8>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 画图\n",
    "from matplotlib import pyplot\n",
    "pyplot.plot(hist.history['loss'])\n",
    "pyplot.plot(hist.history['val_loss'])\n",
    "pyplot.title('model train vs validation loss')\n",
    "pyplot.ylabel('loss')\n",
    "pyplot.xlabel('epoch')\n",
    "pyplot.legend(['train', 'validation'], loc='upper right')\n",
    "pyplot.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "5"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def train_and_evaluate_model(model,x_train,y_train,x_val,y_val):\n",
    "    early_stopping = EarlyStopping(monitor='val_loss',patience=15)\n",
    "    hist = model.fit(x_train,y_train,\n",
    "                  batch_size=batch_size,\n",
    "                  epochs=nb_epoch,verbose=1,\n",
    "                  callbacks = [early_stopping],\n",
    "                  validation_data=(x_val,y_val),shuffle=True)\n",
    "    return hist\n",
    "# 交叉验证\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "skf = StratifiedKFold(n_splits=5)\n",
    "skf.get_n_splits(data,labels)\n",
    "x = np.zeros((600,1))\n",
    "y = np.zeros(600)\n",
    "hists=[]\n",
    "for train_index,test_index in skf.split(x,y):\n",
    "    model = None\n",
    "    model = create_model()\n",
    "    hists.append(train_and_evaluate_model(model,data[train_index],labels[train_index],data[test_index],labels[test_index]))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
