{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Import"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-21T06:21:58.364788Z",
     "start_time": "2019-05-21T06:21:55.909160Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import copy\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n",
    "from tensorflow.python.layers import core as core_layers\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from tensorflow.python.ops import array_ops\n",
    "import time\n",
    "import jieba\n",
    "\n",
    "from Util import mybleu\n",
    "from Util import myResidualCell\n",
    "import random\n",
    "import pickle\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "def idx2str(s):\n",
    "    return ' '.join([id2w[idx] for idx in s])\n",
    "\n",
    "def str2idx(idx):\n",
    "    idx = idx.strip()\n",
    "    return [w2id[idxx] for idxx in idx.split()]\n",
    "\n",
    "def pad(x, pid, move_go=False):\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [len(k) for k in x]\n",
    "    max_length = max(length_list)\n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "def pad_maxlength(x, pid, move_go=False):\n",
    "    if move_go:\n",
    "        length_list = [len(k)-1 for k in x]\n",
    "    else:\n",
    "        length_list = [len(k) for k in x]\n",
    "    max_length = 16\n",
    "    pad_x = []\n",
    "    for k in x:\n",
    "        if move_go:\n",
    "            pad_k = k[1:] + [pid,] * (max_length - len(k[1:]))\n",
    "        else:\n",
    "            pad_k = k[:max_length] + [pid,] * (max_length - len(k))\n",
    "        pad_x.append(pad_k)\n",
    "    return pad_x, length_list\n",
    "\n",
    "import nltk\n",
    "def word_overlap_edit(s1, s2):\n",
    "    t1 = set(s1.split())\n",
    "    t2 = set(s2.split())\n",
    "    word_overlap = float(len(t1 & t2)) / len(t1 | t2)\n",
    "    edit_distance = 1 - float(nltk.edit_distance(s1.split(), s2.split())) /  max(len(s1.split()), len(s2.split()))\n",
    "    return word_overlap, edit_distance\n",
    "tf.logging.set_verbosity(tf.logging.INFO)\n",
    "sess_conf = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-21T06:22:02.425494Z",
     "start_time": "2019-05-21T06:22:02.275076Z"
    }
   },
   "outputs": [],
   "source": [
    "w2id, id2w = pickle.load(open('/workspace/Data/yelp/w2id_id2w.pkl','rb'))\n",
    "original, reference, original_noun, reference_noun = pickle.load(open('/workspace/Data/yelp/original_reference_and_noun.pkl','rb'))\n",
    "C_original = [[1, 0] for i in range(500)] + [[0, 1] for i in range(500)]\n",
    "assert len(original) == 1000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-21T06:22:02.535267Z",
     "start_time": "2019-05-21T06:22:02.531835Z"
    }
   },
   "outputs": [],
   "source": [
    "ppl_upper = 10000000"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Evaluation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Init"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-21T06:22:13.113989Z",
     "start_time": "2019-05-21T06:22:04.338998Z"
    }
   },
   "outputs": [],
   "source": [
    "from textBiLSTM import *\n",
    "\n",
    "BATCH_SIZE = 256\n",
    "NUM_EPOCH = 30\n",
    "train_dir ='Model/YELP/TextBiLSTM-all/'\n",
    "#MAX_LENGTH = 16\n",
    "import pickle\n",
    "w2id_all, id2w_all, X_indices_all, C_labels_all = pickle.load(open('/workspace/Data/yelp/w2id_id2w_indices_labels_all.pkl','rb'))\n",
    "\n",
    "\n",
    "\n",
    "bilstm_dp = BiLSTM_DP(X_indices_all, C_labels_all, w2id_all,  BATCH_SIZE, n_epoch=NUM_EPOCH, test_data=None)\n",
    "\n",
    "g_bilstm = tf.Graph()\n",
    "sess_bilstm = tf.Session(graph=g_bilstm, config=sess_conf) \n",
    "with sess_bilstm.as_default():\n",
    "    with sess_bilstm.graph.as_default():\n",
    "        B = BiLSTM(\n",
    "            dp = bilstm_dp,\n",
    "            rnn_size = 512,\n",
    "            n_layers = 1,\n",
    "            encoder_embedding_dim = 256,\n",
    "            cell_type = 'lstm',\n",
    "            num_classes = 2,\n",
    "            sess=sess_bilstm\n",
    "        )\n",
    "#B.restore('Model/YELP/TextBiLSTM-all/model-11')\n",
    "B.restore('Model/YELP/TextBiLSTM-appendix/model-7')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-21T06:22:13.126965Z",
     "start_time": "2019-05-21T06:22:13.117166Z"
    }
   },
   "outputs": [],
   "source": [
    "import kenlm\n",
    "lm = kenlm.Model('/workspace/Moses/YELP_lm/yelp.blm')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:16:25.132996Z",
     "start_time": "2019-05-17T06:16:25.119680Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'length_' in ff[:7] and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:16:26.816043Z",
     "start_time": "2019-05-17T06:16:26.615897Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = min(ppl_upper, lm.perplexity(s))\n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[1000:]), np.mean(ppl_list[:1000]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:16:32.354850Z",
     "start_time": "2019-05-17T06:16:29.163191Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    \n",
    "    for i in range(1000, 2000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i-1000]):\n",
    "            acc_list.append(1.)\n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Increase"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:16:41.250708Z",
     "start_time": "2019-05-17T06:16:36.893196Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2res[name] = dict()\n",
    "    acc_list = name2acc[name]\n",
    "    cnt_key = 0\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, length, cnt, maxit = o\n",
    "            length_list.append(float(length) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1\n",
    "                #print(s, keywords[0])\n",
    "                \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "        \n",
    "      \n",
    "            \n",
    "        \n",
    "        #if '3stage' in name and is_choose_first and '0.9' in name and 'ItFalse' in name and '0.1' in name:\n",
    "        #if is_choose_first and '0.1' in name:\n",
    "        if True:\n",
    "        #if 'BOW-04' in name and is_choose_first:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), np.mean(acc_list[1000:]), np.mean(ppl_list[1000:]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key / 10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Decrease"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T04:43:22.362000Z",
     "start_time": "2019-05-17T04:43:20.967126Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    if len(res) < 1000:\n",
    "        continue\n",
    "    elif len(res) > 1000:\n",
    "        res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    \n",
    "    acc_list = name2acc[name]\n",
    "    cnt_key = 0\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, length, cnt, maxit = o\n",
    "            length_list.append(float(length) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), np.mean(acc_list[1000:]), np.mean(ppl_list[1000:]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key / 10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Keywords"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:17:19.910870Z",
     "start_time": "2019-05-17T06:17:19.901572Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'keywords_' in ff[:len('keywords_')] and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:17:20.439776Z",
     "start_time": "2019-05-17T06:17:20.347486Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 1000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 1000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = min(ppl_upper, lm.perplexity(s))\n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[500:]), np.mean(ppl_list[:500]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Original"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:17:22.914177Z",
     "start_time": "2019-05-17T06:17:22.890251Z"
    }
   },
   "outputs": [],
   "source": [
    "replace_words = pickle.load(open('replace_words.pkl','rb'))\n",
    "cnt = 0\n",
    "\n",
    "for i in range(1000):\n",
    "    keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "    if len(keywords) == 0:\n",
    "        continue\n",
    "    if keywords[0] in idx2str(original[i]):\n",
    "        print(keywords[0], idx2str(original[i]))\n",
    "        cnt += 1\n",
    "print(cnt / float(len(original)) * 100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:17:24.018261Z",
     "start_time": "2019-05-17T06:17:23.267426Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 1000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 1000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:17:26.639359Z",
     "start_time": "2019-05-17T06:17:24.889667Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    cnt_key = 0\n",
    "    res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, length, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                 \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[:1000]), np.mean(ppl_list[:1000]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sentiment Length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:18:08.731014Z",
     "start_time": "2019-05-17T06:18:08.722627Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'sentiment_length_' in ff[:len('sentiment_length_')] and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:18:24.826668Z",
     "start_time": "2019-05-17T06:18:24.698213Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = min(ppl_upper, lm.perplexity(s))\n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[1000:]), np.mean(ppl_list[:1000]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:18:27.354487Z",
     "start_time": "2019-05-17T06:18:26.079561Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    \n",
    "    for i in range(1000, 2000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i-1000]):\n",
    "            acc_list.append(1.)\n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### IncreaseSentiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:18:30.399792Z",
     "start_time": "2019-05-17T06:18:28.210943Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    cnt_key = 0\n",
    "    res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, cnt, maxit = o\n",
    "            length_list.append(float(length) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                 \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[:1000]), np.mean(ppl_list[:1000]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/10.))\n",
    "    \n",
    "    \n",
    "    \n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DecreaseSentiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T06:18:32.242996Z",
     "start_time": "2019-05-17T06:18:31.472740Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    res = res[1000:]\n",
    "    name2res[name] = dict()\n",
    "    cnt_key = 0.\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, cnt, maxit = o\n",
    "            length_list.append(float(length) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[1000:]), np.mean(ppl_list[1000:]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/ 10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Sentiment Keywords"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:37.514473Z",
     "start_time": "2019-05-17T07:08:37.506141Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'keywordssentiment_' in ff[:len('keywordssentiment_')] and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:38.115236Z",
     "start_time": "2019-05-17T07:08:37.811189Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 1000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 1000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = np.min([3000, lm.perplexity(s)])\n",
    "        \n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[500:]), np.mean(ppl_list[:500]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:39.329079Z",
     "start_time": "2019-05-17T07:08:38.289770Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 1000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 1000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:41.370294Z",
     "start_time": "2019-05-17T07:08:39.332118Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    cnt_key = 0\n",
    "    res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                 \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[:1000]), np.mean(ppl_list[:1000]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/10.))\n",
    "    \n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Keywords Length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:46.802820Z",
     "start_time": "2019-05-17T07:08:46.791524Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'keywordslength_' in ff[:len('keywordslength_')] and '.pkl' in ff:\n",
    "            file_name_list.append(ff)\n",
    "            print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:48.605669Z",
     "start_time": "2019-05-17T07:08:48.049482Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = np.min([3000, lm.perplexity(s)])\n",
    "        \n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[1000:]), np.mean(ppl_list[:1000]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:50.580224Z",
     "start_time": "2019-05-17T07:08:48.655140Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    \n",
    "    for i in range(1000, 2000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i-1000]):\n",
    "            acc_list.append(1.)\n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### IncreaseSentiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:53.846559Z",
     "start_time": "2019-05-17T07:08:50.585186Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    cnt_key = 0\n",
    "    res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                 \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[:1000]), np.mean(ppl_list[:1000]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DecreaseSentiment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:08:55.021835Z",
     "start_time": "2019-05-17T07:08:53.849600Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    res = res[1000:]\n",
    "    name2res[name] = dict()\n",
    "    cnt_key = 0.\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[1000:]), np.mean(ppl_list[1000:]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/ 10.))\n",
    "    \n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Multi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:05:03.504865Z",
     "start_time": "2019-05-17T07:05:03.490035Z"
    }
   },
   "outputs": [],
   "source": [
    "import os\n",
    "file_name_list = []\n",
    "for r,t,f in os.walk('Results/Finegrained/'):\n",
    "    for ff in f:\n",
    "        if 'multi_' in ff[:len('multi_')] and '.pkl' in ff:\n",
    "            res = pickle.load(open('Results/Finegrained/' + ff, 'rb'))\n",
    "            if len(res) == 2000:\n",
    "                file_name_list.append(ff)\n",
    "                print(ff)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:07:30.603377Z",
     "start_time": "2019-05-17T07:07:30.524445Z"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "name2ppl = dict()\n",
    "str2ppl = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    name2ppl[name] = []   \n",
    "    \n",
    "    ppl_list = []\n",
    "    str_list = []\n",
    "    #print(name, len(res[0]))\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "    for s in str_list:\n",
    "        str2ppl[s] = np.min([3000, lm.perplexity(s)])\n",
    "        \n",
    "        ppl_list.append(str2ppl[s])\n",
    "    \n",
    "    name2ppl[name] = (np.mean(ppl_list[1000:]), np.mean(ppl_list[:1000]), ppl_list) \n",
    "    print(name, np.mean(ppl_list))\n",
    "pickle.dump(name2ppl, open('Results/Finegrained/metrics/name2ppl.pkl','wb'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:07:32.767184Z",
     "start_time": "2019-05-17T07:07:32.494353Z"
    }
   },
   "outputs": [],
   "source": [
    "name2acc = dict()\n",
    "for name in file_name_list:\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    acc_list = []\n",
    "    str_list = []\n",
    "    assert len(res) == 2000\n",
    "    for i,t in enumerate(res):\n",
    "        str_list.append(t[0])\n",
    "        #print(t[0])\n",
    "            \n",
    "    assert len(str_list) == 2000\n",
    "        \n",
    "    idx_list = [[w2id_all[idx] for idx in s.split()] + [w2id_all['<EOS>']] for s in str_list]\n",
    "    pad_x, length_list = pad(idx_list, w2id_all['<PAD>'],move_go=False)\n",
    "    res_class = B.sess.run(B.predictions, {B.input_x: pad_x, \n",
    "                                           B.X_seq_len:length_list,\n",
    "                                           B.output_keep_prob:1.0,\n",
    "                                           B.input_keep_prob:1.0})\n",
    "    acc_cnt = 0\n",
    "    for i in range(1000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i]):\n",
    "            acc_list.append(1.)\n",
    "            \n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    \n",
    "    \n",
    "    for i in range(1000, 2000):\n",
    "        #print(res_class[i], np.argmax(C_original[i]))\n",
    "        if res_class[i] != np.argmax(C_original[i-1000]):\n",
    "            acc_list.append(1.)\n",
    "        else:\n",
    "            acc_list.append(0.)\n",
    "    name2acc[name] = acc_list \n",
    "    print(name, np.mean(acc_list))       \n",
    "pickle.dump(name2acc, open('Results/Finegrained/metrics/name2acc.pkl','wb'))\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### IncreaseMulti"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:07:34.738651Z",
     "start_time": "2019-05-17T07:07:34.309270Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    cnt_key = 0\n",
    "    res = res[:1000]\n",
    "    name2res[name] = dict()\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, flag, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                 \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[:1000]), np.mean(ppl_list[:1000]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/10.))\n",
    "    \n",
    "    \n",
    "   \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### DecreaseMulti"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2019-05-17T07:07:36.815691Z",
     "start_time": "2019-05-17T07:07:36.665107Z"
    }
   },
   "outputs": [],
   "source": [
    "name2ppl = pickle.load(open('Results/Finegrained/metrics/name2ppl.pkl','rb'))\n",
    "name2acc = pickle.load(open('Results/Finegrained/metrics/name2acc.pkl','rb'))\n",
    "name2res = dict()\n",
    "name2content = dict()\n",
    "for name in file_name_list:\n",
    "    acc_list = name2acc[name]\n",
    "    res = pickle.load(open('Results/Finegrained/' + name, 'rb'))\n",
    "    \n",
    "    res = res[1000:]\n",
    "    name2res[name] = dict()\n",
    "    cnt_key = 0.\n",
    "    for is_choose_first in [True]:\n",
    "        length_list = []\n",
    "        max_it_list = []\n",
    "        content_list = []\n",
    "        content_pc_list = []\n",
    "        succ_list = []\n",
    "        word_overlap_list = []\n",
    "        edit_distance_list = []\n",
    "        if is_choose_first:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]\n",
    "            #acc, acc_list = name2acc[name]\n",
    "        else:\n",
    "            ppl_neg, ppl_pos, ppl_list = name2ppl[name]['last']\n",
    "            #acc, acc_list = name2acc[name]['last']\n",
    "        for i,o in enumerate(res):\n",
    "            s, auc, length, flag, cnt, maxit = o\n",
    "            length_list.append(float(len(s.split())) / len(original[i]))\n",
    "            str_list.append(s)\n",
    "            max_it_list.append(maxit)\n",
    "            word_overlap, edit_distance = word_overlap_edit(s, idx2str(original[i]))\n",
    "            word_overlap_list.append(word_overlap)\n",
    "            edit_distance_list.append(edit_distance)\n",
    "            keywords = [replace_words[t[0]] for t in original_noun[i]][:1]\n",
    "            if len(keywords) == 0:\n",
    "                continue\n",
    "            if keywords[0] in s:\n",
    "                cnt_key += 1                                                \n",
    "            if cnt > 0:\n",
    "                content = 1.\n",
    "                content_list.append(content)\n",
    "            else:\n",
    "                content = 0.\n",
    "                content_list.append(content)\n",
    "            if len(original_noun[i]) == 0:\n",
    "                content_pc = 1.\n",
    "                content = 1.\n",
    "            else:\n",
    "                content_pc = float(cnt) / len(original_noun[i])\n",
    "            content_pc_list.append(content_pc)\n",
    "            \n",
    "    \n",
    "        if True:\n",
    "            name2content[name] = np.mean(content_list) * 100\n",
    "            print('%s & %.1f & %.1f & %.1f & %.1f & %.1f & %.1f\\\\\\\\' % (name.replace('_','-'), 100.* np.mean(acc_list[1000:]), np.mean(ppl_list[1000:]), np.mean(word_overlap_list) * 100, np.mean(content_pc_list) * 100, np.mean(length_list) * 100, cnt_key/ 10.))\n",
    "    \n",
    "    \n",
    "    \n",
    "      "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "165px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
