{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import pathlib\n",
    "import pickle\n",
    "import os"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 神经网络初始化用的一些"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "gpu_id='0'\n",
    "def init_env():\n",
    "    os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "init_env()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import KFold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 构建一个给词编码的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class WordsNameNumber:\n",
    "    '''\n",
    "    输入一个篇章 处理 得到词频字典\n",
    "    对字典进行排序 按照词频由大到小\n",
    "    按照排序结果给词进行编码 由 1 开始（便于在cnn中使用）\n",
    "    可以设置停用词词典 若有词典则在进行排序时就删除这些词\n",
    "    '''\n",
    "    def __init__(self,data=None,stopwords=None,tokenizer=None):\n",
    "        '''\n",
    "        输入有三个\n",
    "        data 表示输入的篇章 和sklearn中的feature_extraction保持一致\n",
    "        输入一个list list中为string句子 词之间用空格分开\n",
    "        stopwords 为一个list 一个元素代表一个停用词\n",
    "        tokenizer表示分词器 默认用空格进行分词 可以传入一个函数 之后会用于进行分词\n",
    "        '''\n",
    "        self.data = data\n",
    "        self.stopwords = stopwords\n",
    "        self.tokenizer = tokenizer\n",
    "        \n",
    "        self.wordsdict = {}\n",
    "        self.numdict = {}\n",
    "    def fit(self,data=None):\n",
    "        '''\n",
    "        构建词典\n",
    "        之后根据stopwords进行修正\n",
    "        之后按照词频排序\n",
    "        之后输出编码词典\n",
    "        '''\n",
    "        if not None == data:\n",
    "            self.data = data\n",
    "        if None == self.data:\n",
    "            print('No data')\n",
    "            return {}\n",
    "        self.__check()\n",
    "        for line in self.data:\n",
    "            words = self.tokenizer(line)\n",
    "            if len(words) == 0:\n",
    "                continue\n",
    "            for word in words:\n",
    "                if word in self.stopwords:\n",
    "                    continue\n",
    "                if not word in self.wordsdict.keys():\n",
    "                    self.wordsdict[word] = 1\n",
    "                else:\n",
    "                    self.wordsdict[word] += 1\n",
    "        wordsList = sorted(self.wordsdict,key=lambda x:self.wordsdict[x],reverse=True)\n",
    "        for num,key in enumerate(wordsList,start=1):\n",
    "            self.numdict[key] = num\n",
    "        print('Complete!')\n",
    "    def transform(self,data,padding=False):\n",
    "        '''\n",
    "        输入一个篇章 使用tokenizer进行分词之后\n",
    "        按照已经fit得到的编号字典对篇章进行编号\n",
    "        输出一个词编号的矩阵 格式为numpy.array\n",
    "        每一行表示一个篇章中的句子的编码形式\n",
    "        当padding为True时 输出需要保持所有的行长度一致\n",
    "        此处需要注意 当transform输入的词存在集外词的时候 默认将其编号为0\n",
    "        '''\n",
    "        if None == data:\n",
    "            print('No data')\n",
    "            return np.array([])\n",
    "        if not type(data) == list:\n",
    "            data = [data]\n",
    "        self.__check()\n",
    "        numMatrix = []\n",
    "        maxLen = 0\n",
    "        for line in data:\n",
    "            words = self.tokenizer(line)\n",
    "            maxLen = len(words) if maxLen < len(words) else maxLen\n",
    "            numMatrix.append([0 if i not in self.numdict.keys() else self.numdict[i] for i in words])\n",
    "        if True == padding:\n",
    "            for num,_ in enumerate(numMatrix):\n",
    "                numMatrix[num] += (maxLen - len(numMatrix[num]))*[0]\n",
    "        return np.array(numMatrix)\n",
    "    \n",
    "    def __check(self):\n",
    "        '''\n",
    "        用于检查目前将要执行的操作是否条件完备\n",
    "        一些程序运行共有条件的check机制\n",
    "        主要是类中的tokenizer是否正确\n",
    "        '''\n",
    "        if None == self.tokenizer:\n",
    "            self.tokenizer = lambda x:x.split(' ')\n",
    "        if None == self.stopwords:\n",
    "            self.stopwords = []"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 语料准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "content = [line.strip() for line in open('./data/DoubanZH.txt','r',encoding='utf-8').readlines()]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "label = []\n",
    "sentence = []\n",
    "for line in content:\n",
    "    l,s = line.split(',')\n",
    "    label.append(0 if l=='10' else 1)\n",
    "    sentence.append(s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(data=[i for i in zip(sentence,label)],columns=['sentence','label'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "document = [i for i in df['sentence']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "# 定义一个取样本batch的对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "class batcher:\n",
    "    '''\n",
    "    分batch\n",
    "    输入数据 根据容量上限进行batch划分\n",
    "    需要指定batch_size\n",
    "    '''\n",
    "    def __init__(self,seed=17):\n",
    "        '''\n",
    "        初始化 设定随机数种子\n",
    "        以及取用数据时的集合\n",
    "        '''\n",
    "        np.random.seed(seed)\n",
    "        \n",
    "    def __randint(self,maxlen,batch_size):\n",
    "        '''\n",
    "        输出batch_size的随机数\n",
    "        '''\n",
    "        number = []\n",
    "        while len(number) < batch_size:\n",
    "            rd = np.random.randint(0,maxlen)\n",
    "            if not rd in number:\n",
    "                number.append(rd)\n",
    "        return number\n",
    "    \n",
    "    def get_batch(self,data,label,batch_size):\n",
    "        '''\n",
    "        输出给定的数据的batch_size大小的对象\n",
    "        '''\n",
    "        feature = data\n",
    "        mark = np.array(label)\n",
    "        maxlen = len(label)\n",
    "        randint = self.__randint(maxlen,batch_size,)\n",
    "        return feature[randint],mark[randint]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# class batcher:\n",
    "#     '''\n",
    "#     分batch\n",
    "#     输入数据 根据容量上限进行batch划分\n",
    "#     需要指定batch_size\n",
    "#     '''\n",
    "#     def __init__(self,seed=17):\n",
    "#         '''\n",
    "#         初始化 设定随机数种子\n",
    "#         以及取用数据时的集合\n",
    "#         '''\n",
    "#         self.stop_set = []\n",
    "#         np.random.seed(seed)\n",
    "        \n",
    "#     def __randint(self,maxlen,batch_size):\n",
    "#         '''\n",
    "#         输出batch_size的随机数\n",
    "#         '''\n",
    "#         if len(self.stop_set) == maxlen:\n",
    "#             self.stop_set = []\n",
    "#         number = []\n",
    "#         while len(number) < batch_size:\n",
    "#             rd = np.random.randint(0,maxlen)\n",
    "#             if not rd in number and not rd in self.stop_set:\n",
    "#                 number.append(rd)\n",
    "#         self.stop_set += number\n",
    "#         return number\n",
    "    \n",
    "#     def get_batch(self,data,label,batch_size):\n",
    "#         '''\n",
    "#         输出给定的数据的batch_size大小的对象\n",
    "#         '''\n",
    "#         feature = data\n",
    "#         mark = np.array(label)\n",
    "#         maxlen = len(label)\n",
    "#         randint = self.__randint(maxlen,batch_size,)\n",
    "#         return feature[randint],mark[randint]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "bcher_train = batcher()\n",
    "bcher_valid = batcher()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
