{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pathlib\n",
    "import pickle\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpu_id='0'\n",
    "def init_env():\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "init_env()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import KFold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 构建一个给词编码的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class WordsNameNumber:\n",
    "    '''\n",
    "    输入一个篇章 处理 得到词频字典\n",
    "    对字典进行排序 按照词频由大到小\n",
    "    按照排序结果给词进行编码 由 1 开始（便于在cnn中使用）\n",
    "    可以设置停用词词典 若有词典则在进行排序时就删除这些词\n",
    "    '''\n",
    "    def __init__(self,data=None,stopwords=None,tokenizer=None):\n",
    "        '''\n",
    "        输入有三个\n",
    "        data 表示输入的篇章 和sklearn中的feature_extraction保持一致\n",
    "        输入一个list list中为string句子 词之间用空格分开\n",
    "        stopwords 为一个list 一个元素代表一个停用词\n",
    "        tokenizer表示分词器 默认用空格进行分词 可以传入一个函数 之后会用于进行分词\n",
    "        '''\n",
    "        self.data = data\n",
    "        self.stopwords = stopwords\n",
    "        self.tokenizer = tokenizer\n",
    "        \n",
    "        self.wordsdict = {}\n",
    "        self.numdict = {}\n",
    "    def fit(self,data=None):\n",
    "        '''\n",
    "        构建词典\n",
    "        之后根据stopwords进行修正\n",
    "        之后按照词频排序\n",
    "        之后输出编码词典\n",
    "        '''\n",
    "        if not None == data:\n",
    "            self.data = data\n",
    "        if None == self.data:\n",
    "            print('No data')\n",
    "            return {}\n",
    "        self.__check()\n",
    "        for line in self.data:\n",
    "            words = self.tokenizer(line)\n",
    "            if len(words) == 0:\n",
    "                continue\n",
    "            for word in words:\n",
    "                if word in self.stopwords:\n",
    "                    continue\n",
    "                if not word in self.wordsdict.keys():\n",
    "                    self.wordsdict[word] = 1\n",
    "                else:\n",
    "                    self.wordsdict[word] += 1\n",
    "        wordsList = sorted(self.wordsdict,key=lambda x:self.wordsdict[x],reverse=True)\n",
    "        for num,key in enumerate(wordsList,start=1):\n",
    "            self.numdict[key] = num\n",
    "        print('Complete!')\n",
    "    def transform(self,data,padding=False):\n",
    "        '''\n",
    "        输入一个篇章 使用tokenizer进行分词之后\n",
    "        按照已经fit得到的编号字典对篇章进行编号\n",
    "        输出一个词编号的矩阵 格式为numpy.array\n",
    "        每一行表示一个篇章中的句子的编码形式\n",
    "        当padding为True时 输出需要保持所有的行长度一致\n",
    "        此处需要注意 当transform输入的词存在集外词的时候 默认将其编号为0\n",
    "        '''\n",
    "        if None == data:\n",
    "            print('No data')\n",
    "            return np.array([])\n",
    "        if not type(data) == list:\n",
    "            data = [data]\n",
    "        self.__check()\n",
    "        numMatrix = []\n",
    "        maxLen = 0\n",
    "        for line in data:\n",
    "            words = self.tokenizer(line)\n",
    "            maxLen = len(words) if maxLen < len(words) else maxLen\n",
    "            numMatrix.append([0 if i not in self.numdict.keys() else self.numdict[i] for i in words])\n",
    "        if True == padding:\n",
    "            for num,_ in enumerate(numMatrix):\n",
    "                numMatrix[num] += (maxLen - len(numMatrix[num]))*[0]\n",
    "        return np.array(numMatrix)\n",
    "    \n",
    "    def __check(self):\n",
    "        '''\n",
    "        用于检查目前将要执行的操作是否条件完备\n",
    "        一些程序运行共有条件的check机制\n",
    "        主要是类中的tokenizer是否正确\n",
    "        '''\n",
    "        if None == self.tokenizer:\n",
    "            self.tokenizer = lambda x:x.split(' ')\n",
    "        if None == self.stopwords:\n",
    "            self.stopwords = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 语料准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "content = [line.strip() for line in open('./data/DoubanZH.txt','r',encoding='utf-8').readlines()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = []\n",
    "sentence = []\n",
    "for line in content:\n",
    "    l,s = line.split(',')\n",
    "    label.append(0 if l=='10' else 1)\n",
    "    sentence.append(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(data=[i for i in zip(sentence,label)],columns=['sentence','label'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "document = [i for i in df['sentence']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 词编号 为cnn结构进行准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "wnn = WordsNameNumber()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Complete!\n"
     ]
    }
   ],
   "source": [
    "wnn.fit(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "numbering = wnn.transform(document,padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(274012, 192)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "numbering.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# tfidf 特征构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "tfidf = TfidfVectorizer(max_features=1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "TfidfVectorizer(analyzer='word', binary=False, decode_error='strict',\n",
       "        dtype=<class 'numpy.int64'>, encoding='utf-8', input='content',\n",
       "        lowercase=True, max_df=1.0, max_features=1000, min_df=1,\n",
       "        ngram_range=(1, 1), norm='l2', preprocessor=None, smooth_idf=True,\n",
       "        stop_words=None, strip_accents=None, sublinear_tf=False,\n",
       "        token_pattern='(?u)\\\\b\\\\w\\\\w+\\\\b', tokenizer=None, use_idf=True,\n",
       "        vocabulary=None)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tfidf.fit(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "tfidfFeature = tfidf.transform(document)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(274012, 1000)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tfidfFeature.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用tfidf特征 输入MLP中"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 首先进行训练集测试集划分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tfidfFeature = tfidfFeature[:10000]\n",
    "# label = label[:10000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "border = int(tfidfFeature.shape[0] * 0.9)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = np.array([[i] for i in label])\n",
    "\n",
    "train_x =tfidfFeature[:border]\n",
    "test_x = tfidfFeature[border:]\n",
    "\n",
    "train_y = label[:border]\n",
    "test_y = label[border:]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "# 定义一个取样本batch的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "class batcher:\n",
    "    '''\n",
    "    分batch\n",
    "    输入数据 根据容量上限进行batch划分\n",
    "    需要指定batch_size\n",
    "    '''\n",
    "    def __init__(self,seed=17):\n",
    "        '''\n",
    "        初始化 设定随机数种子\n",
    "        以及取用数据时的集合\n",
    "        '''\n",
    "        np.random.seed(seed)\n",
    "        \n",
    "    def __randint(self,maxlen,batch_size):\n",
    "        '''\n",
    "        输出batch_size的随机数\n",
    "        '''\n",
    "        number = []\n",
    "        while len(number) < batch_size:\n",
    "            rd = np.random.randint(0,maxlen)\n",
    "            if not rd in number:\n",
    "                number.append(rd)\n",
    "        return number\n",
    "    \n",
    "    def get_batch(self,data,label,batch_size):\n",
    "        '''\n",
    "        输出给定的数据的batch_size大小的对象\n",
    "        '''\n",
    "        feature = data\n",
    "        mark = np.array(label)\n",
    "        maxlen = len(label)\n",
    "        randint = self.__randint(maxlen,batch_size,)\n",
    "        return feature[randint],mark[randint]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# class batcher:\n",
    "#     '''\n",
    "#     分batch\n",
    "#     输入数据 根据容量上限进行batch划分\n",
    "#     需要指定batch_size\n",
    "#     '''\n",
    "#     def __init__(self,seed=17):\n",
    "#         '''\n",
    "#         初始化 设定随机数种子\n",
    "#         以及取用数据时的集合\n",
    "#         '''\n",
    "#         self.stop_set = []\n",
    "#         np.random.seed(seed)\n",
    "        \n",
    "#     def __randint(self,maxlen,batch_size):\n",
    "#         '''\n",
    "#         输出batch_size的随机数\n",
    "#         '''\n",
    "#         if len(self.stop_set) == maxlen:\n",
    "#             self.stop_set = []\n",
    "#         number = []\n",
    "#         while len(number) < batch_size:\n",
    "#             rd = np.random.randint(0,maxlen)\n",
    "#             if not rd in number and not rd in self.stop_set:\n",
    "#                 number.append(rd)\n",
    "#         self.stop_set += number\n",
    "#         return number\n",
    "    \n",
    "#     def get_batch(self,data,label,batch_size):\n",
    "#         '''\n",
    "#         输出给定的数据的batch_size大小的对象\n",
    "#         '''\n",
    "#         feature = data\n",
    "#         mark = np.array(label)\n",
    "#         maxlen = len(label)\n",
    "#         randint = self.__randint(maxlen,batch_size,)\n",
    "#         return feature[randint],mark[randint]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "bcher_train = batcher()\n",
    "bcher_valid = batcher()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = tf.placeholder(tf.float32,shape=[None,1000])\n",
    "y = tf.placeholder(tf.float32,shape=[None,1])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_lr(x):\n",
    "    '''\n",
    "    返回一个lr模型\n",
    "    '''\n",
    "    w = tf.Variable(tf.random_normal([1000,1]))\n",
    "    b = tf.Variable(tf.random_normal([1]))\n",
    "\n",
    "    y_ = tf.nn.sigmoid(tf.add(tf.matmul(x,w),b))\n",
    "    \n",
    "    return y_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_mlp(x):\n",
    "    w_h1 = tf.Variable(tf.random_normal([1000,128]))\n",
    "    b_h1 = tf.Variable(tf.random_normal([128]))\n",
    "\n",
    "    h1 = tf.nn.sigmoid(tf.add(tf.matmul(x,w_h1),b_h1))\n",
    "    \n",
    "    \n",
    "    w_out = tf.Variable(tf.random_normal([128,1]))\n",
    "    b_out = tf.Variable(tf.random_normal([1]))\n",
    "    \n",
    "    y_ = tf.nn.sigmoid(tf.add(tf.matmul(h1,w_out),b_out))\n",
    "    return y_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和测试过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0\n",
      "loss:0.6139430999755859\n",
      "Manual ACC: 0.666015625\n",
      "Train ACC: 0.6953125\n",
      "Test ACC: 0.7265523\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 20\n",
      "loss:0.39328354597091675\n",
      "Manual ACC: 0.82421875\n",
      "Train ACC: 0.8203125\n",
      "Test ACC: 0.8235159\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 40\n",
      "loss:0.3992350995540619\n",
      "Manual ACC: 0.822265625\n",
      "Train ACC: 0.82421875\n",
      "Test ACC: 0.8288804\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 60\n",
      "loss:0.3457793891429901\n",
      "Manual ACC: 0.833984375\n",
      "Train ACC: 0.8378906\n",
      "Test ACC: 0.82789516\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 80\n",
      "loss:0.344027578830719\n",
      "Manual ACC: 0.84375\n",
      "Train ACC: 0.8515625\n",
      "Test ACC: 0.8249756\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 100\n",
      "loss:0.3479865789413452\n",
      "Manual ACC: 0.8359375\n",
      "Train ACC: 0.8535156\n",
      "Test ACC: 0.82847905\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 120\n",
      "loss:0.3959735035896301\n",
      "Manual ACC: 0.82421875\n",
      "Train ACC: 0.8300781\n",
      "Test ACC: 0.82807755\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 140\n",
      "loss:0.36701685190200806\n",
      "Manual ACC: 0.818359375\n",
      "Train ACC: 0.8222656\n",
      "Test ACC: 0.8288804\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 160\n",
      "loss:0.3903234601020813\n",
      "Manual ACC: 0.828125\n",
      "Train ACC: 0.8261719\n",
      "Test ACC: 0.82760316\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 180\n",
      "loss:0.33218616247177124\n",
      "Manual ACC: 0.849609375\n",
      "Train ACC: 0.84765625\n",
      "Test ACC: 0.82942784\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 200\n",
      "loss:0.3447932004928589\n",
      "Manual ACC: 0.8515625\n",
      "Train ACC: 0.8496094\n",
      "Test ACC: 0.8293913\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 220\n",
      "loss:0.3676448464393616\n",
      "Manual ACC: 0.8359375\n",
      "Train ACC: 0.8339844\n",
      "Test ACC: 0.8294643\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 240\n",
      "loss:0.3660311698913574\n",
      "Manual ACC: 0.8359375\n",
      "Train ACC: 0.83984375\n",
      "Test ACC: 0.8290264\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 260\n",
      "loss:0.3464682102203369\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.8359375\n",
      "Test ACC: 0.8293184\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 280\n",
      "loss:0.3569771945476532\n",
      "Manual ACC: 0.830078125\n",
      "Train ACC: 0.84765625\n",
      "Test ACC: 0.8293548\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 300\n",
      "loss:0.3617563843727112\n",
      "Manual ACC: 0.830078125\n",
      "Train ACC: 0.8359375\n",
      "Test ACC: 0.8288075\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 320\n",
      "loss:0.36401182413101196\n",
      "Manual ACC: 0.8359375\n",
      "Train ACC: 0.83203125\n",
      "Test ACC: 0.82906294\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 340\n",
      "loss:0.35680344700813293\n",
      "Manual ACC: 0.845703125\n",
      "Train ACC: 0.84765625\n",
      "Test ACC: 0.83015776\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 360\n",
      "loss:0.3545486330986023\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.8417969\n",
      "Test ACC: 0.8282965\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 380\n",
      "loss:0.3553462028503418\n",
      "Manual ACC: 0.845703125\n",
      "Train ACC: 0.8496094\n",
      "Test ACC: 0.8280046\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 400\n",
      "loss:0.3800435960292816\n",
      "Manual ACC: 0.826171875\n",
      "Train ACC: 0.82421875\n",
      "Test ACC: 0.82888037\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 420\n",
      "loss:0.3630647361278534\n",
      "Manual ACC: 0.83203125\n",
      "Train ACC: 0.8222656\n",
      "Test ACC: 0.82300496\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 440\n",
      "loss:0.34459054470062256\n",
      "Manual ACC: 0.8359375\n",
      "Train ACC: 0.8457031\n",
      "Test ACC: 0.83034027\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 460\n",
      "loss:0.3579730689525604\n",
      "Manual ACC: 0.837890625\n",
      "Train ACC: 0.8417969\n",
      "Test ACC: 0.8299023\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 480\n",
      "loss:0.3569902181625366\n",
      "Manual ACC: 0.8203125\n",
      "Train ACC: 0.82421875\n",
      "Test ACC: 0.8286615\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 500\n",
      "loss:0.3760109543800354\n",
      "Manual ACC: 0.82421875\n",
      "Train ACC: 0.83203125\n",
      "Test ACC: 0.82950085\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 520\n",
      "loss:0.34782931208610535\n",
      "Manual ACC: 0.8515625\n",
      "Train ACC: 0.8515625\n",
      "Test ACC: 0.8288439\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 540\n",
      "loss:0.36323657631874084\n",
      "Manual ACC: 0.849609375\n",
      "Train ACC: 0.8574219\n",
      "Test ACC: 0.82953733\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 560\n",
      "loss:0.3470129370689392\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.8359375\n",
      "Test ACC: 0.83096063\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 580\n",
      "loss:0.31633129715919495\n",
      "Manual ACC: 0.845703125\n",
      "Train ACC: 0.8496094\n",
      "Test ACC: 0.8316175\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 600\n",
      "loss:0.29299479722976685\n",
      "Manual ACC: 0.8828125\n",
      "Train ACC: 0.8847656\n",
      "Test ACC: 0.8297197\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 620\n",
      "loss:0.32143956422805786\n",
      "Manual ACC: 0.859375\n",
      "Train ACC: 0.87109375\n",
      "Test ACC: 0.82986575\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 640\n",
      "loss:0.33206868171691895\n",
      "Manual ACC: 0.859375\n",
      "Train ACC: 0.8496094\n",
      "Test ACC: 0.8295374\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 660\n",
      "loss:0.32805365324020386\n",
      "Manual ACC: 0.869140625\n",
      "Train ACC: 0.87109375\n",
      "Test ACC: 0.8311431\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 680\n",
      "loss:0.36007654666900635\n",
      "Manual ACC: 0.833984375\n",
      "Train ACC: 0.8300781\n",
      "Test ACC: 0.8244647\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 700\n",
      "loss:0.3317069411277771\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.8417969\n",
      "Test ACC: 0.83121604\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 720\n",
      "loss:0.3458799123764038\n",
      "Manual ACC: 0.849609375\n",
      "Train ACC: 0.84375\n",
      "Test ACC: 0.8284061\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 740\n",
      "loss:0.39066410064697266\n",
      "Manual ACC: 0.822265625\n",
      "Train ACC: 0.8417969\n",
      "Test ACC: 0.8223116\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 760\n",
      "loss:0.3302682936191559\n",
      "Manual ACC: 0.849609375\n",
      "Train ACC: 0.8574219\n",
      "Test ACC: 0.8258515\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 780\n",
      "loss:0.3301768898963928\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.86328125\n",
      "Test ACC: 0.82782215\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 800\n",
      "loss:0.310603529214859\n",
      "Manual ACC: 0.841796875\n",
      "Train ACC: 0.8515625\n",
      "Test ACC: 0.8293914\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 820\n",
      "loss:0.3567085564136505\n",
      "Manual ACC: 0.818359375\n",
      "Train ACC: 0.828125\n",
      "Test ACC: 0.8253405\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 840\n",
      "loss:0.3096289336681366\n",
      "Manual ACC: 0.85546875\n",
      "Train ACC: 0.8691406\n",
      "Test ACC: 0.8268733\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 860\n",
      "loss:0.2917242646217346\n",
      "Manual ACC: 0.873046875\n",
      "Train ACC: 0.8691406\n",
      "Test ACC: 0.8293184\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 880\n",
      "loss:0.3061753511428833\n",
      "Manual ACC: 0.857421875\n",
      "Train ACC: 0.86328125\n",
      "Test ACC: 0.82826006\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 900\n",
      "loss:nan\n",
      "Manual ACC: 0.609375\n",
      "Train ACC: 0.0\n",
      "Test ACC: 0.0\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 920\n",
      "loss:nan\n",
      "Manual ACC: 0.6171875\n",
      "Train ACC: 0.0\n",
      "Test ACC: 0.0\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 940\n",
      "loss:nan\n",
      "Manual ACC: 0.634765625\n",
      "Train ACC: 0.0\n",
      "Test ACC: 0.0\n",
      "-----------------------------------------------------------------------\n",
      "epoch: 960\n",
      "loss:nan\n",
      "Manual ACC: 0.634765625\n",
      "Train ACC: 0.0\n",
      "Test ACC: 0.0\n",
      "-----------------------------------------------------------------------\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 980\n",
      "loss:nan\n",
      "Manual ACC: 0.662109375\n",
      "Train ACC: 0.0\n",
      "Test ACC: 0.0\n",
      "-----------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "\n",
    "with tf.device('/gpu:0'):\n",
    "    #给定一个模型\n",
    "#     y_ = model_lr(x)\n",
    "    \n",
    "    y_ = model_mlp(x)\n",
    "    \n",
    "    loss = - tf.reduce_sum(y * tf.log(y_) + (1- y) * tf.log(1 - y_),1)\n",
    "    loss_monitor = tf.reduce_mean(loss)\n",
    "    \n",
    "    learn_rate = 0.001\n",
    "    \n",
    "    train_step = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss)\n",
    "    \n",
    "    correct_prediction = tf.equal(tf.cast(y,tf.float32),tf.round(y_))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "    \n",
    "    \n",
    "with tf.Session() as  s:\n",
    "    s.run(tf.global_variables_initializer())\n",
    "    \n",
    "    for i in range(1000):\n",
    "        for p in range(0,int(len(train_y)/512)):\n",
    "            batch_x,batch_y = bcher_train.get_batch(train_x,train_y,512)\n",
    "            \n",
    "            _,loss_m,yhat = s.run([train_step,loss_monitor,y_],feed_dict = {x:batch_x.todense(),y:batch_y})\n",
    "            \n",
    "        if i % 20 == 0:\n",
    "            print('epoch:',i) \n",
    "            print('loss:{0}'.format(loss_m))\n",
    "#             print('loss:{0}'.format(lr))\n",
    "#             batch_test_x,batch_test_y = bcher_valid.get_batch(test_x,test_y,128)\n",
    "            \n",
    "            print('Manual ACC:',accuracy_score(batch_y,[1 if i >0.5 else 0 for i in yhat]))\n",
    "            print('Train ACC:',accuracy.eval(feed_dict = {x:batch_x.todense(),y:batch_y}))\n",
    "            print('Test ACC:',accuracy.eval(feed_dict = {x:test_x.todense(),y:test_y}))\n",
    "            print('-----------------------------------------------------------------------')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# sklearn LR 对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.linear_model import LogisticRegression"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [],
   "source": [
    "lr = LogisticRegression()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py:578: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n",
      "  y = column_or_1d(y, warn=True)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n",
       "          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,\n",
       "          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n",
       "          verbose=0, warm_start=False)"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lr.fit(train_x,train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_y = lr.predict(test_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.8306328005255091"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "accuracy_score(pred_y,test_y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# lightgbm 对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "import lightgbm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "lgb = lightgbm.LGBMClassifier()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/label.py:95: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n",
      "  y = column_or_1d(y, warn=True)\n",
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/label.py:128: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n",
      "  y = column_or_1d(y, warn=True)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,\n",
       "        learning_rate=0.1, max_depth=-1, min_child_samples=20,\n",
       "        min_child_weight=0.001, min_split_gain=0.0, n_estimators=100,\n",
       "        n_jobs=-1, num_leaves=31, objective=None, random_state=None,\n",
       "        reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0,\n",
       "        subsample_for_bin=200000, subsample_freq=1)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lgb.fit(train_x,train_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/lawbda/env/anaconda3/lib/python3.6/site-packages/sklearn/preprocessing/label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n",
      "  if diff:\n"
     ]
    }
   ],
   "source": [
    "lgb_y = lgb.predict(test_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.8218743157433764"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "accuracy_score(lgb_y,test_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
