{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "Create by 2018-05-20\n",
    "\n",
    "@author: Shiyipaisizuo\n",
    "\"\"\"\n",
    "\n",
    "from numpy import *\n",
    "import re\n",
    "import operator\n",
    "\n",
    "\n",
    "# 加载数据\n",
    "\n",
    "def load_data_set():\n",
    "    #we define Data，six data about text\n",
    "    posting_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n",
    "                    ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n",
    "                    ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n",
    "                    ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n",
    "                    ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n",
    "                    ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n",
    "\n",
    "    # 1代表侮辱性文字， 0代表正常言论\n",
    "    #first have no problem\n",
    "    #second have proble，and soso\n",
    "    class_vec = [0, 1, 0, 1, 0, 1]\n",
    "\n",
    "    return posting_list, class_vec\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个包含在所有文档中出现的不重复的列表\n",
    "def create_vocab_list(data_set):\n",
    "\n",
    "    # 创建一个空集\n",
    "    vocab_set = set([])\n",
    "\n",
    "    for document in data_set:\n",
    "        vocab_set = vocab_set | set(document)\n",
    "    # 创建两个集合的并集\n",
    "    return list(vocab_set)\n",
    "\n",
    "\n",
    "# 输入参数是一个文档，输出的是文档向量\n",
    "#vocab_list:一个包含所有单词的list,input_set:输入的进行分割好的句子如：['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "def set_of_word_vec(vocab_list, input_set):\n",
    "\n",
    "    # 创建一个所含向量都为0的向量\n",
    "    vec = [0] * len(vocab_list)\n",
    "    #遍历我们输入的句子，如果出现某个单词，在vec（包含所有单词的list）对应位置+1\n",
    "    for word in input_set:\n",
    "        if word in vocab_list:\n",
    "            vec[vocab_list.index(word)] += 1\n",
    "#     返回出现单词的list\n",
    "    return vec\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'myvocablist' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-4-f20cf8832a4d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mset_of_word_vec\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmyvocablist\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'my'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'dog'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'has'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'flea'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'problems'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'help'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'please'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'myvocablist' is not defined"
     ]
    }
   ],
   "source": [
    "print(set_of_word_vec(myvocablist,['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1, 32)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "listoposts,listclasses=load_data_set()\n",
    "myvocablist=create_vocab_list(listoposts)\n",
    "mat(myvocablist).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯分类器的训练\n",
    "#train_matrix：表示训练的集合,在我们测试中，我们的训练集为6*32的矩阵如下一个cell\n",
    "# train_category：表示训练集对应的1*6的矩阵，测试中如[0, 1, 0, 1, 0, 1]\n",
    "def train(train_matrix, train_category):\n",
    "#     print(train_matrix)\n",
    "#     print('``````````````')\n",
    "#     print(train_category) \n",
    "#     print('```````````')\n",
    "    #计算一共有多少个训练的数据\n",
    "    num_train_docs = len(train_matrix)\n",
    "    #有我自己修改的成分：计算一共有多少不同的单词\n",
    "    num_words=0\n",
    "    for i in range(num_train_docs):\n",
    "        if train_matrix[i] is not None:\n",
    "            num_words = len(train_matrix[i])\n",
    "    print('numwords:',num_words)\n",
    "    #计算p_abusive：所有邮件中，违规的概率\n",
    "    p_abusive = sum(train_category) / float(num_train_docs)\n",
    "#     print('num1:',num_train_docs)\n",
    "\n",
    "    # 初始化概率，矩阵[1 1 1 1 1 1 1......]\n",
    "    p0_num = ones(num_words)\n",
    "    p1_num = ones(num_words)#创建numpy.zeros数组,词条出现数初始化为0\n",
    "    p0_denom = 2.0\n",
    "    p1_denom = 2.0#分母初始化为0\n",
    "    # 向量相加\n",
    "#   for 循环所有的训练数据，如果训练的数据的标签是1，\n",
    "#   p1_num：【1 1 1 1 .。。。】加上对应的矩阵（如【0 1 0 1 1.。。。】）矩阵加法\n",
    "#   p1_denom：计算所有违规的单词数\n",
    "    for i in range(num_train_docs):\n",
    "        if train_category[i] == 1:#统计属于侮辱类的条件概率所需的数据，即P(w0|1),P(w1|1),P(w2|1)···\n",
    "            p1_num += train_matrix[i]\n",
    "            p1_denom += sum(train_matrix[i])\n",
    "#             print('p1:',p1_num)\n",
    "#             print('p1_denom',p1_denom)\n",
    "        else:\n",
    "            p0_num += train_matrix[i]\n",
    "            p0_denom += sum(train_matrix[i])\n",
    "\n",
    "    # 对每个元素做除法\n",
    "    print(p0_num)\n",
    "    #最终得到：p1_num / p1_denom，值可能如下\n",
    "    #p1_num ：【2 5 1 0 6 .。。。。】，\n",
    "    #p1_denom：40\n",
    "    p1_vec = log(p1_num / p1_denom)\n",
    "    p0_vec = log(p0_num / p0_denom)\n",
    "    #返回两个矩阵[-3.13549422 -3.13549422 -3.13549422 -3.13549422 。。。。。。]，一个概率\n",
    "    return p0_vec, p1_vec, p_abusive"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]\n"
     ]
    }
   ],
   "source": [
    "trainMat=[]\n",
    "for postindoc in listoposts:\n",
    "    trainMat.append(set_of_word_vec(myvocablist,postindoc))\n",
    "print((trainMat))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "numwords: 32\n",
      "[1. 2. 2. 2. 2. 2. 1. 2. 2. 1. 1. 2. 3. 1. 2. 1. 1. 2. 2. 2. 2. 2. 2. 2.\n",
      " 2. 2. 1. 1. 2. 1. 1. 4.]\n",
      "[-3.25809654 -2.56494936 -2.56494936 -2.56494936 -2.56494936 -2.56494936\n",
      " -3.25809654 -2.56494936 -2.56494936 -3.25809654 -3.25809654 -2.56494936\n",
      " -2.15948425 -3.25809654 -2.56494936 -3.25809654 -3.25809654 -2.56494936\n",
      " -2.56494936 -2.56494936 -2.56494936 -2.56494936 -2.56494936 -2.56494936\n",
      " -2.56494936 -2.56494936 -3.25809654 -3.25809654 -2.56494936 -3.25809654\n",
      " -3.25809654 -1.87180218] [-2.35137526 -2.35137526 -3.04452244 -3.04452244 -3.04452244 -3.04452244\n",
      " -2.35137526 -3.04452244 -3.04452244 -2.35137526 -1.65822808 -3.04452244\n",
      " -2.35137526 -1.94591015 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n",
      " -3.04452244 -1.94591015 -2.35137526 -3.04452244 -3.04452244 -3.04452244\n",
      " -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -2.35137526\n",
      " -2.35137526 -3.04452244] 0.5\n"
     ]
    }
   ],
   "source": [
    "p0v,p1v,pAb=train(trainMat,listclasses)\n",
    "print(p0v,p1v,pAb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的分类\n",
    "#vec_classify:我们输入的处理好的句子对应哪个单词是否出现，如：[1 0 1 0 0.....]\n",
    "#p0_vec, p1_vec, p_class就是train返回三个值\n",
    "def classify(vec_classify, p0_vec, p1_vec, p_class):\n",
    "    p1 = sum(vec_classify * p1_vec) + log(p_class)\n",
    "    p0 = sum(vec_classify * p0_vec) + log(1.0 - p_class)\n",
    "#     print(p1)\n",
    "#     print(p0)\n",
    "    if p1 > p0:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的词袋模型，同set_of_word_vec(vocab_list, input_set):\n",
    "def bag_of_word_vec(vocab_list, inpiut_set):\n",
    "    vec = [0] * len(vocab_list)\n",
    "    for word in inpiut_set:\n",
    "        if word in vocab_list:\n",
    "            vec[vocab_list.index(word)] += 1\n",
    "    return vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的测试\n",
    "def test():\n",
    "    list_of_posts, list_classes = load_data_set()\n",
    "    my_vocal_list = create_vocab_list(list_of_posts)\n",
    "#得到的train_matrix是6*32的矩阵\n",
    "    train_matrix = []\n",
    "    for post_in_doc in list_of_posts:\n",
    "        train_matrix.append(set_of_word_vec(my_vocal_list, post_in_doc))\n",
    "#训练\n",
    "    p0_vec, p1_vec, p_abusive = train(array(train_matrix), array(list_classes))\n",
    "#     print('QQQQQQQ')\n",
    "#     print(p0_vec, p1_vec, p_abusive)\n",
    "    test_entry = ['love', 'my', 'dalmation']\n",
    "    this_doc = array(set_of_word_vec(my_vocal_list, test_entry))\n",
    "#     print('QQQQ',this_doc)\n",
    "    print(test_entry, 'classified as:', classify(this_doc,p0_vec, p1_vec, p_abusive))\n",
    "\n",
    "    test_entry = ['stupid', 'garbage']\n",
    "    this_doc = array(set_of_word_vec(my_vocal_list, test_entry))\n",
    "    print(test_entry, 'classified as: ', classify(this_doc, p0_vec, p1_vec, p_abusive))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "numwords: 32\n",
      "[1. 2. 2. 2. 2. 2. 1. 2. 2. 1. 1. 2. 3. 1. 2. 1. 1. 2. 2. 2. 2. 2. 2. 2.\n",
      " 2. 2. 1. 1. 2. 1. 1. 4.]\n",
      "['love', 'my', 'dalmation'] classified as: 0\n",
      "['stupid', 'garbage'] classified as:  1\n"
     ]
    }
   ],
   "source": [
    "test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['this',\n",
       " 'book',\n",
       " 'the',\n",
       " 'best',\n",
       " 'book',\n",
       " 'python',\n",
       " 'm_l',\n",
       " 'have',\n",
       " 'ever',\n",
       " 'laid',\n",
       " 'eyes',\n",
       " 'upon']"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 切割分类文本\n",
    "def text_parse(big_string):\n",
    "    list_of_tokens = re.split('\\W+', big_string)\n",
    "\n",
    "    return [tok.lower() for tok in list_of_tokens if len(tok) > 2]\n",
    "mySent='This book is the best book on Python or M_L. I !!!!have ever@@@ laid eyes upon.'\n",
    "text_parse(mySent)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "#实战环节：上面很重要！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "numwords: 694\n",
      "[ 1.  2.  3.  3.  1.  2.  4.  1.  3.  2.  2.  3.  2.  2.  2.  1.  2.  1.\n",
      "  4.  1.  8.  2.  1.  1.  2.  1.  2.  1.  1.  1.  1.  1.  2.  3.  3.  2.\n",
      "  1.  2.  2.  5.  2.  2.  2.  1.  1.  3.  1.  3.  2.  1.  1.  2.  1.  3.\n",
      "  2.  2.  3.  2.  1.  2.  2.  2.  2.  2.  1.  2.  2.  2.  1.  1.  4.  1.\n",
      "  2.  1.  2.  2.  1.  3.  6.  1.  1.  1. 13.  1.  1.  1.  2.  2.  2.  1.\n",
      "  2.  2.  2.  3.  2.  3.  1.  3. 42.  2.  1.  4.  2.  3.  1.  3.  1.  2.\n",
      "  4.  2.  2.  2.  2.  2.  3.  2.  2.  1.  1.  1.  2.  7.  2.  1.  1.  1.\n",
      "  3.  4.  2.  3.  1.  1. 14.  2.  2.  2.  3.  2.  2.  2.  2.  4.  2.  2.\n",
      "  4.  1.  2.  2.  2.  2.  3.  2.  1.  2.  1.  2.  1.  4.  2.  2.  1.  6.\n",
      "  2.  3.  2.  4.  3.  3.  3.  1.  2. 11.  1.  3.  1.  3.  2.  1.  4.  7.\n",
      "  2.  1.  1.  2.  5.  1.  1.  1.  4.  2.  2.  2.  2.  1.  3.  1.  3.  2.\n",
      "  1.  2.  2.  1.  1. 11.  2.  1.  3.  1.  1.  2.  2.  2.  2.  1.  5.  1.\n",
      " 10.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.  4.  1.  2. 10.  2.  2.  4.\n",
      "  1.  2.  2.  2.  2.  2.  1.  2.  3.  2.  2.  2.  2.  1.  2.  1.  1.  1.\n",
      "  2.  2.  2.  1.  2.  4.  1.  2.  3.  3.  1.  1.  2.  1.  2.  2.  1.  2.\n",
      "  1.  2.  3.  1.  2.  2.  2.  2.  1.  2.  1.  2.  2.  1.  1.  2.  3.  1.\n",
      "  2.  1.  2.  1.  4.  3.  4.  1.  1.  2.  4.  1.  2.  2.  4.  1.  2.  2.\n",
      "  1.  1.  1.  2.  3.  2.  2.  1.  1.  2.  2.  3.  2.  2.  2.  1.  2.  2.\n",
      "  1.  1.  1.  2.  2.  6.  4.  3.  2.  2.  1.  3.  1.  1.  2.  2.  1.  2.\n",
      "  1.  2.  5.  2.  2.  2.  2.  1.  1.  2.  3.  2.  1.  3.  1.  1.  2.  2.\n",
      "  2.  1.  1.  2.  2.  1.  1.  2.  2.  1.  2.  2.  2.  3.  1.  1.  2.  1.\n",
      "  2.  6. 32.  1.  1.  2.  1.  2.  2.  7.  1.  2.  2.  1.  1.  2.  2.  2.\n",
      "  1.  2.  5.  2.  1.  2.  1.  2.  1.  1.  2.  3.  2.  3.  2.  1.  2.  1.\n",
      "  1.  2.  2.  2.  2.  2.  2.  2.  2.  1.  4.  2. 11.  1.  3.  1.  2.  1.\n",
      "  2.  2.  2.  1.  1.  1.  1.  2.  1.  2.  2.  2.  1.  3.  6.  1.  2.  1.\n",
      "  2.  2.  1.  1.  2.  5.  1.  1.  5.  4.  2.  1.  2.  4.  1.  3.  1.  4.\n",
      "  2.  1.  3.  3.  1.  2. 12.  2.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.\n",
      "  2.  2.  1.  1.  2.  5.  2.  2.  2.  2.  1.  1.  2.  2.  1.  2.  2.  3.\n",
      "  2.  1.  1.  2.  5.  2.  3.  2.  1.  1.  5.  2.  1.  5.  2.  1.  2.  3.\n",
      "  1.  2.  2.  2.  2.  1.  2.  1.  2.  3.  9.  2.  1.  2.  2.  2.  2.  2.\n",
      "  1.  1.  1.  1.  1.  5.  5.  1.  2.  2.  1.  2.  2.  2.  1.  1.  1.  6.\n",
      "  1.  2.  2.  1.  7.  2.  1.  4.  1.  2. 12.  2.  2.  3.  1.  5.  1.  2.\n",
      "  2.  1.  1.  1.  1.  2.  2.  2.  1.  2.  1.  1.  1.  2.  1.  2.  2.  3.\n",
      "  2.  2.  3.  3.  2.  1.  2.  2.  1.  2.  2.  2.  2.  2.  2.  1.  2. 26.\n",
      "  2.  1.  1.  1.  2.  2.  1.  1.  1.  1.  1.  1.  2.  2.  4.  2.  2.  1.\n",
      "  4.  1.  3.  3.  1.  2.  8.  1.  2.  3.  1.  1.  4.  1.  2.  3.  2.  2.\n",
      "  3.  2.  1.  2.  2.  2.  3.  7.  1.  1.  3.  1.  2.  4.  1.  1.  2.  3.\n",
      "  2.  1.  2.  2.  1.  1.  2.  2.  2.  2.  1.  1.  1.  3.  2.  1.  2.  1.\n",
      "  2.  3.  1.  2.  1.  1.  1.  2.  2.  5.]\n",
      "classification error ['ordercializviagra', 'online', 'save', '0nline', 'pharmacy', 'noprescription', 'required', 'buy', 'canadian', 'drugs', 'wholesale', 'prices', 'and', 'save', 'fda', 'approved', 'drugs', 'superb', 'quality', 'drugs', 'only', 'accept', 'all', 'major', 'credit', 'cards']\n",
      "classification error ['arvind', 'thirumalai', 'commented', 'your', 'status', 'arvind', 'wrote', 'you', 'know', 'reply', 'this', 'email', 'comment', 'this', 'status']\n",
      "classification error ['yeah', 'ready', 'may', 'not', 'here', 'because', 'jar', 'jar', 'has', 'plane', 'tickets', 'germany', 'for']\n",
      "classification error ['what', 'going', 'there', 'talked', 'john', 'email', 'talked', 'about', 'some', 'computer', 'stuff', 'that', 'went', 'bike', 'riding', 'the', 'rain', 'was', 'not', 'that', 'cold', 'went', 'the', 'museum', 'yesterday', 'was', 'get', 'and', 'they', 'had', 'free', 'food', 'the', 'same', 'time', 'was', 'giants', 'game', 'when', 'got', 'done', 'had', 'take', 'the', 'train', 'with', 'all', 'the', 'giants', 'fans', 'they', 'are', 'drunk']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['experience', 'with', 'biggerpenis', 'today', 'grow', 'inches', 'more', 'the', 'safest', 'most', 'effective', 'methods', 'of_penisen1argement', 'save', 'your', 'time', 'and', 'money', 'bettererections', 'with', 'effective', 'ma1eenhancement', 'products', 'ma1eenhancement', 'supplement', 'trusted', 'millions', 'buy', 'today']\n",
      "classification error ['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']\n",
      "classification error ['thanks', 'peter', 'definitely', 'check', 'this', 'how', 'your', 'book', 'going', 'heard', 'chapter', 'came', 'and', 'was', 'good', 'shape', 'hope', 'you', 'are', 'doing', 'well', 'cheers', 'troy']\n",
      "classification error ['codeine', 'the', 'most', 'competitive', 'price', 'net', 'codeine', 'wilson', '30mg', '156', 'codeine', 'wilson', '30mg', '291', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '396', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '120', '492', 'freeviagra', 'pills']\n",
      "the error rate is:  0.2\n",
      "numwords: 694\n",
      "[ 1.  1.  3.  1.  1.  2.  4.  1.  3.  1.  2.  3.  2.  2.  2.  1.  2.  1.\n",
      "  3.  1.  8.  2.  1.  1.  2.  1.  2.  1.  1.  1.  1.  1.  2.  2.  3.  2.\n",
      "  1.  1.  2.  4.  2.  2.  2.  1.  1.  2.  1.  3.  2.  1.  1.  2.  1.  2.\n",
      "  2.  2.  3.  2.  1.  2.  2.  2.  2.  2.  1.  2.  2.  2.  1.  1.  3.  1.\n",
      "  1.  1.  2.  2.  1.  1.  5.  1.  1.  1. 13.  1.  1.  1.  2.  2.  1.  1.\n",
      "  2.  2.  2.  3.  2.  3.  1.  3. 35.  2.  1.  4.  1.  1.  1.  3.  1.  2.\n",
      "  4.  2.  2.  2.  1.  2.  2.  2.  2.  1.  1.  1.  2.  7.  2.  1.  1.  1.\n",
      "  2.  4.  2.  2.  1.  1. 12.  2.  1.  2.  3.  2.  2.  2.  2.  3.  2.  2.\n",
      "  3.  1.  2.  2.  2.  2.  1.  2.  1.  1.  1.  2.  1.  4.  2.  2.  1.  6.\n",
      "  2.  3.  2.  4.  3.  2.  3.  1.  2. 11.  1.  3.  1.  3.  2.  1.  4.  6.\n",
      "  2.  1.  1.  1.  5.  1.  1.  1.  4.  2.  2.  2.  2.  1.  3.  1.  3.  1.\n",
      "  1.  1.  2.  1.  1. 10.  2.  1.  3.  1.  1.  2.  1.  2.  2.  1.  5.  1.\n",
      "  8.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.  3.  1.  2.  8.  2.  2.  4.\n",
      "  1.  2.  2.  1.  2.  2.  1.  2.  2.  2.  2.  2.  2.  1.  2.  1.  1.  1.\n",
      "  2.  2.  2.  1.  2.  2.  1.  2.  2.  3.  1.  1.  1.  1.  2.  2.  1.  2.\n",
      "  1.  2.  3.  1.  2.  2.  2.  2.  1.  2.  1.  2.  2.  1.  1.  2.  3.  1.\n",
      "  2.  1.  1.  1.  1.  3.  4.  1.  1.  2.  4.  1.  2.  2.  3.  1.  2.  2.\n",
      "  1.  1.  1.  2.  3.  1.  2.  1.  1.  2.  2.  2.  2.  2.  2.  1.  1.  2.\n",
      "  1.  1.  1.  2.  2.  6.  3.  3.  2.  2.  1.  3.  1.  1.  2.  2.  1.  2.\n",
      "  1.  2.  5.  2.  2.  2.  1.  1.  1.  2.  3.  2.  1.  3.  1.  1.  2.  2.\n",
      "  1.  1.  1.  2.  2.  1.  1.  2.  2.  1.  1.  2.  2.  3.  1.  1.  2.  1.\n",
      "  2.  6. 27.  1.  1.  2.  1.  1.  2.  6.  1.  1.  1.  1.  1.  2.  1.  2.\n",
      "  1.  2.  5.  2.  1.  2.  1.  1.  1.  1.  2.  3.  1.  3.  2.  1.  2.  1.\n",
      "  1.  2.  2.  2.  2.  2.  2.  2.  2.  1.  3.  2.  9.  1.  3.  1.  2.  1.\n",
      "  2.  2.  2.  1.  1.  1.  1.  2.  1.  2.  2.  2.  1.  3.  5.  1.  2.  1.\n",
      "  2.  2.  1.  1.  2.  1.  1.  1.  5.  4.  2.  1.  2.  2.  1.  3.  1.  4.\n",
      "  2.  1.  2.  3.  1.  2.  8.  2.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.\n",
      "  2.  1.  1.  1.  2.  4.  2.  2.  2.  2.  1.  1.  2.  2.  1.  1.  2.  3.\n",
      "  1.  1.  1.  2.  5.  2.  3.  2.  1.  1.  5.  2.  1.  5.  2.  1.  2.  3.\n",
      "  1.  2.  2.  2.  2.  1.  2.  1.  2.  2.  8.  2.  1.  2.  2.  2.  2.  2.\n",
      "  1.  1.  1.  1.  1.  5.  4.  1.  2.  2.  1.  2.  2.  2.  1.  1.  1.  6.\n",
      "  1.  1.  2.  1.  6.  2.  1.  4.  1.  2. 10.  2.  1.  3.  1.  5.  1.  2.\n",
      "  2.  1.  1.  1.  1.  2.  2.  2.  1.  1.  1.  1.  1.  2.  1.  2.  2.  3.\n",
      "  2.  1.  3.  3.  2.  1.  2.  2.  1.  2.  2.  1.  1.  2.  2.  1.  2. 23.\n",
      "  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  1.  1.  2.  2.  3.  2.  2.  1.\n",
      "  4.  1.  3.  3.  1.  2.  2.  1.  2.  3.  1.  1.  4.  1.  2.  2.  2.  2.\n",
      "  3.  2.  1.  2.  2.  1.  3.  6.  1.  1.  3.  1.  2.  4.  1.  1.  2.  2.\n",
      "  2.  1.  2.  2.  1.  1.  2.  2.  2.  2.  1.  1.  1.  3.  2.  1.  2.  1.\n",
      "  2.  3.  1.  2.  1.  1.  1.  2.  1.  3.]\n",
      "classification error ['ordercializviagra', 'online', 'save', '0nline', 'pharmacy', 'noprescription', 'required', 'buy', 'canadian', 'drugs', 'wholesale', 'prices', 'and', 'save', 'fda', 'approved', 'drugs', 'superb', 'quality', 'drugs', 'only', 'accept', 'all', 'major', 'credit', 'cards']\n",
      "classification error ['arvind', 'thirumalai', 'commented', 'your', 'status', 'arvind', 'wrote', 'you', 'know', 'reply', 'this', 'email', 'comment', 'this', 'status']\n",
      "classification error ['yeah', 'ready', 'may', 'not', 'here', 'because', 'jar', 'jar', 'has', 'plane', 'tickets', 'germany', 'for']\n",
      "classification error ['what', 'going', 'there', 'talked', 'john', 'email', 'talked', 'about', 'some', 'computer', 'stuff', 'that', 'went', 'bike', 'riding', 'the', 'rain', 'was', 'not', 'that', 'cold', 'went', 'the', 'museum', 'yesterday', 'was', 'get', 'and', 'they', 'had', 'free', 'food', 'the', 'same', 'time', 'was', 'giants', 'game', 'when', 'got', 'done', 'had', 'take', 'the', 'train', 'with', 'all', 'the', 'giants', 'fans', 'they', 'are', 'drunk']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['experience', 'with', 'biggerpenis', 'today', 'grow', 'inches', 'more', 'the', 'safest', 'most', 'effective', 'methods', 'of_penisen1argement', 'save', 'your', 'time', 'and', 'money', 'bettererections', 'with', 'effective', 'ma1eenhancement', 'products', 'ma1eenhancement', 'supplement', 'trusted', 'millions', 'buy', 'today']\n",
      "classification error ['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']\n",
      "classification error ['thanks', 'peter', 'definitely', 'check', 'this', 'how', 'your', 'book', 'going', 'heard', 'chapter', 'came', 'and', 'was', 'good', 'shape', 'hope', 'you', 'are', 'doing', 'well', 'cheers', 'troy']\n",
      "classification error ['codeine', 'the', 'most', 'competitive', 'price', 'net', 'codeine', 'wilson', '30mg', '156', 'codeine', 'wilson', '30mg', '291', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '396', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '120', '492', 'freeviagra', 'pills']\n",
      "classification error ['this', 'mail', 'was', 'sent', 'from', 'notification', 'only', 'address', 'that', 'cannot', 'accept', 'incoming', 'mail', 'please', 'not', 'reply', 'this', 'message', 'thank', 'you', 'for', 'your', 'online', 'reservation', 'the', 'store', 'you', 'selected', 'has', 'located', 'the', 'item', 'you', 'requested', 'and', 'has', 'placed', 'hold', 'your', 'name', 'please', 'note', 'that', 'all', 'items', 'are', 'held', 'for', 'day', 'please', 'note', 'store', 'prices', 'may', 'differ', 'from', 'those', 'online', 'you', 'have', 'questions', 'need', 'assistance', 'with', 'your', 'reservation', 'please', 'contact', 'the', 'store', 'the', 'phone', 'number', 'listed', 'below', 'you', 'can', 'also', 'access', 'store', 'information', 'such', 'store', 'hours', 'and', 'location', 'the', 'web', 'http', 'www', 'borders', 'com', 'online', 'store', 'storedetailview_98']\n",
      "classification error ['buy', 'ambiem', 'zolpidem', '5mg', '10mg', 'pill', 'pills', '129', 'pills', '199', '180', 'pills', '430', 'pills', '138', '120', 'pills', '322']\n",
      "classification error ['codeine', '15mg', 'for', '203', 'visa', 'only', 'codeine', 'methylmorphine', 'narcotic', 'opioid', 'pain', 'reliever', 'have', '15mg', '30mg', 'pills', '15mg', 'for', '203', '15mg', 'for', '385', '15mg', 'for', '562', 'visa', 'only']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "classification error ['peter', 'these', 'are', 'the', 'only', 'good', 'scenic', 'ones', 'and', 'too', 'bad', 'there', 'was', 'girl', 'back', 'one', 'them', 'just', 'try', 'enjoy', 'the', 'blue', 'sky']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['ryan', 'whybrew', 'commented', 'your', 'status', 'ryan', 'wrote', 'turd', 'ferguson', 'butt', 'horn']\n",
      "classification error ['oem', 'adobe', 'microsoft', 'softwares', 'fast', 'order', 'and', 'download', 'microsoft', 'office', 'professional', 'plus', '2007', '2010', '129', 'microsoft', 'windows', 'ultimate', '119', 'adobe', 'photoshop', 'cs5', 'extended', 'adobe', 'acrobat', 'pro', 'extended', 'windows', 'professional', 'thousand', 'more', 'titles']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "the error rate is:  0.1\n",
      "numwords: 694\n",
      "[ 1.  1.  2.  1.  1.  2.  3.  1.  3.  1.  2.  2.  2.  2.  1.  1.  2.  1.\n",
      "  2.  1.  8.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  2.  2.  3.  2.\n",
      "  1.  1.  2.  3.  1.  2.  2.  1.  1.  2.  1.  3.  2.  1.  1.  2.  1.  2.\n",
      "  2.  2.  3.  1.  1.  2.  2.  2.  1.  2.  1.  1.  2.  1.  1.  1.  3.  1.\n",
      "  1.  1.  1.  2.  1.  1.  4.  1.  1.  1. 13.  1.  1.  1.  2.  2.  1.  1.\n",
      "  1.  1.  2.  3.  2.  3.  1.  2. 30.  2.  1.  4.  1.  1.  1.  3.  1.  2.\n",
      "  1.  2.  2.  2.  1.  2.  2.  2.  2.  1.  1.  1.  2.  7.  1.  1.  1.  1.\n",
      "  2.  4.  2.  2.  1.  1.  9.  2.  1.  1.  1.  2.  1.  2.  2.  3.  1.  2.\n",
      "  3.  1.  2.  1.  1.  2.  1.  2.  1.  1.  1.  2.  1.  3.  1.  1.  1.  6.\n",
      "  1.  1.  2.  4.  3.  2.  3.  1.  1. 11.  1.  1.  1.  3.  2.  1.  4.  6.\n",
      "  2.  1.  1.  1.  4.  1.  1.  1.  1.  2.  2.  2.  2.  1.  3.  1.  3.  1.\n",
      "  1.  1.  2.  1.  1.  9.  1.  1.  2.  1.  1.  1.  1.  2.  2.  1.  4.  1.\n",
      "  7.  1.  2.  1.  1.  1.  1.  1.  2.  1.  1.  3.  1.  2.  7.  2.  2.  4.\n",
      "  1.  2.  1.  1.  2.  2.  1.  2.  2.  2.  2.  2.  2.  1.  2.  1.  1.  1.\n",
      "  2.  2.  2.  1.  2.  2.  1.  2.  2.  1.  1.  1.  1.  1.  1.  2.  1.  1.\n",
      "  1.  2.  2.  1.  1.  1.  1.  1.  1.  2.  1.  2.  2.  1.  1.  2.  3.  1.\n",
      "  2.  1.  1.  1.  1.  3.  3.  1.  1.  2.  4.  1.  2.  2.  3.  1.  2.  2.\n",
      "  1.  1.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.  1.  2.  2.  1.  1.  1.\n",
      "  1.  1.  1.  2.  2.  6.  3.  2.  2.  2.  1.  1.  1.  1.  2.  2.  1.  1.\n",
      "  1.  2.  5.  1.  2.  2.  1.  1.  1.  2.  3.  1.  1.  3.  1.  1.  2.  2.\n",
      "  1.  1.  1.  1.  1.  1.  1.  1.  2.  1.  1.  1.  2.  2.  1.  1.  1.  1.\n",
      "  2.  6. 23.  1.  1.  2.  1.  1.  2.  3.  1.  1.  1.  1.  1.  2.  1.  2.\n",
      "  1.  2.  4.  2.  1.  1.  1.  1.  1.  1.  2.  3.  1.  3.  1.  1.  2.  1.\n",
      "  1.  1.  2.  1.  2.  2.  2.  1.  2.  1.  3.  2.  9.  1.  3.  1.  1.  1.\n",
      "  1.  2.  2.  1.  1.  1.  1.  1.  1.  2.  2.  1.  1.  3.  4.  1.  1.  1.\n",
      "  1.  2.  1.  1.  2.  1.  1.  1.  5.  4.  2.  1.  1.  2.  1.  3.  1.  3.\n",
      "  2.  1.  1.  2.  1.  2.  7.  1.  1.  2.  1.  1.  1.  1.  1.  2.  1.  2.\n",
      "  2.  1.  1.  1.  2.  3.  2.  2.  1.  1.  1.  1.  1.  2.  1.  1.  2.  3.\n",
      "  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  5.  2.  1.  5.  1.  1.  2.  2.\n",
      "  1.  2.  2.  2.  2.  1.  2.  1.  2.  2.  7.  2.  1.  1.  1.  2.  2.  2.\n",
      "  1.  1.  1.  1.  1.  1.  4.  1.  2.  2.  1.  1.  1.  2.  1.  1.  1.  6.\n",
      "  1.  1.  2.  1.  6.  2.  1.  4.  1.  2.  5.  2.  1.  3.  1.  1.  1.  1.\n",
      "  2.  1.  1.  1.  1.  2.  2.  2.  1.  1.  1.  1.  1.  2.  1.  2.  2.  3.\n",
      "  1.  1.  2.  3.  2.  1.  2.  2.  1.  2.  2.  1.  1.  2.  2.  1.  1. 18.\n",
      "  1.  1.  1.  1.  1.  2.  1.  1.  1.  1.  1.  1.  2.  2.  2.  2.  2.  1.\n",
      "  1.  1.  3.  3.  1.  1.  2.  1.  2.  2.  1.  1.  2.  1.  2.  1.  2.  2.\n",
      "  3.  1.  1.  1.  2.  1.  1.  5.  1.  1.  3.  1.  1.  1.  1.  1.  1.  2.\n",
      "  2.  1.  2.  2.  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  2.  1.  2.  1.\n",
      "  2.  3.  1.  1.  1.  1.  1.  1.  1.  2.]\n",
      "classification error ['ordercializviagra', 'online', 'save', '0nline', 'pharmacy', 'noprescription', 'required', 'buy', 'canadian', 'drugs', 'wholesale', 'prices', 'and', 'save', 'fda', 'approved', 'drugs', 'superb', 'quality', 'drugs', 'only', 'accept', 'all', 'major', 'credit', 'cards']\n",
      "classification error ['arvind', 'thirumalai', 'commented', 'your', 'status', 'arvind', 'wrote', 'you', 'know', 'reply', 'this', 'email', 'comment', 'this', 'status']\n",
      "classification error ['yeah', 'ready', 'may', 'not', 'here', 'because', 'jar', 'jar', 'has', 'plane', 'tickets', 'germany', 'for']\n",
      "classification error ['what', 'going', 'there', 'talked', 'john', 'email', 'talked', 'about', 'some', 'computer', 'stuff', 'that', 'went', 'bike', 'riding', 'the', 'rain', 'was', 'not', 'that', 'cold', 'went', 'the', 'museum', 'yesterday', 'was', 'get', 'and', 'they', 'had', 'free', 'food', 'the', 'same', 'time', 'was', 'giants', 'game', 'when', 'got', 'done', 'had', 'take', 'the', 'train', 'with', 'all', 'the', 'giants', 'fans', 'they', 'are', 'drunk']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['experience', 'with', 'biggerpenis', 'today', 'grow', 'inches', 'more', 'the', 'safest', 'most', 'effective', 'methods', 'of_penisen1argement', 'save', 'your', 'time', 'and', 'money', 'bettererections', 'with', 'effective', 'ma1eenhancement', 'products', 'ma1eenhancement', 'supplement', 'trusted', 'millions', 'buy', 'today']\n",
      "classification error ['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']\n",
      "classification error ['thanks', 'peter', 'definitely', 'check', 'this', 'how', 'your', 'book', 'going', 'heard', 'chapter', 'came', 'and', 'was', 'good', 'shape', 'hope', 'you', 'are', 'doing', 'well', 'cheers', 'troy']\n",
      "classification error ['codeine', 'the', 'most', 'competitive', 'price', 'net', 'codeine', 'wilson', '30mg', '156', 'codeine', 'wilson', '30mg', '291', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '396', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '120', '492', 'freeviagra', 'pills']\n",
      "classification error ['this', 'mail', 'was', 'sent', 'from', 'notification', 'only', 'address', 'that', 'cannot', 'accept', 'incoming', 'mail', 'please', 'not', 'reply', 'this', 'message', 'thank', 'you', 'for', 'your', 'online', 'reservation', 'the', 'store', 'you', 'selected', 'has', 'located', 'the', 'item', 'you', 'requested', 'and', 'has', 'placed', 'hold', 'your', 'name', 'please', 'note', 'that', 'all', 'items', 'are', 'held', 'for', 'day', 'please', 'note', 'store', 'prices', 'may', 'differ', 'from', 'those', 'online', 'you', 'have', 'questions', 'need', 'assistance', 'with', 'your', 'reservation', 'please', 'contact', 'the', 'store', 'the', 'phone', 'number', 'listed', 'below', 'you', 'can', 'also', 'access', 'store', 'information', 'such', 'store', 'hours', 'and', 'location', 'the', 'web', 'http', 'www', 'borders', 'com', 'online', 'store', 'storedetailview_98']\n",
      "classification error ['buy', 'ambiem', 'zolpidem', '5mg', '10mg', 'pill', 'pills', '129', 'pills', '199', '180', 'pills', '430', 'pills', '138', '120', 'pills', '322']\n",
      "classification error ['codeine', '15mg', 'for', '203', 'visa', 'only', 'codeine', 'methylmorphine', 'narcotic', 'opioid', 'pain', 'reliever', 'have', '15mg', '30mg', 'pills', '15mg', 'for', '203', '15mg', 'for', '385', '15mg', 'for', '562', 'visa', 'only']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "classification error ['peter', 'these', 'are', 'the', 'only', 'good', 'scenic', 'ones', 'and', 'too', 'bad', 'there', 'was', 'girl', 'back', 'one', 'them', 'just', 'try', 'enjoy', 'the', 'blue', 'sky']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['ryan', 'whybrew', 'commented', 'your', 'status', 'ryan', 'wrote', 'turd', 'ferguson', 'butt', 'horn']\n",
      "classification error ['oem', 'adobe', 'microsoft', 'softwares', 'fast', 'order', 'and', 'download', 'microsoft', 'office', 'professional', 'plus', '2007', '2010', '129', 'microsoft', 'windows', 'ultimate', '119', 'adobe', 'photoshop', 'cs5', 'extended', 'adobe', 'acrobat', 'pro', 'extended', 'windows', 'professional', 'thousand', 'more', 'titles']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "classification error ['that', 'cold', 'there', 'going', 'retirement', 'party', 'are', 'the', 'leaves', 'changing', 'color']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['peter', 'with', 'jose', 'out', 'town', 'you', 'want', 'meet', 'once', 'while', 'keep', 'things', 'going', 'and', 'some', 'interesting', 'stuff', 'let', 'know', 'eugene']\n",
      "classification error ['thought', 'about', 'this', 'and', 'think', 'possible', 'should', 'get', 'another', 'lunch', 'have', 'car', 'now', 'and', 'could', 'come', 'pick', 'you', 'this', 'time', 'does', 'this', 'wednesday', 'work', 'can', 'have', 'signed', 'copy', 'you', 'book']\n",
      "classification error ['percocet', '625', 'withoutprescription', 'tabs', '225', 'percocet', 'narcotic', 'analgesic', 'used', 'treat', 'moderate', 'moderately', 'severepain', 'top', 'quality', 'express', 'shipping', '100', 'safe', 'discreet', 'private', 'buy', 'cheap', 'percocet', 'online']\n",
      "classification error ['get', 'off', 'online', 'watchesstore', 'discount', 'watches', 'for', 'all', 'famous', 'brands', 'watches', 'arolexbvlgari', 'dior', 'hermes', 'oris', 'cartier', 'and', 'more', 'brands', 'louis', 'vuitton', 'bags', 'wallets', 'gucci', 'bags', 'tiffany', 'jewerly', 'enjoy', 'full', 'year', 'warranty', 'shipment', 'via', 'reputable', 'courier', 'fedex', 'ups', 'dhl', 'and', 'ems', 'speedpost', 'you', 'will', '100', 'recieve', 'your', 'order']\n",
      "classification error ['zach', 'hamm', 'commented', 'your', 'status', 'zach', 'wrote', 'doggy', 'style', 'enough', 'said', 'thank', 'you', 'good', 'night']\n",
      "classification error ['saw', 'this', 'the', 'way', 'the', 'coast', 'thought', 'might', 'like', 'hangzhou', 'huge', 'one', 'day', 'wasn', 'enough', 'but', 'got', 'glimpse', 'went', 'inside', 'the', 'china', 'pavilion', 'expo', 'pretty', 'interesting', 'each', 'province', 'has', 'exhibit']\n",
      "classification error ['scifinance', 'now', 'automatically', 'generates', 'gpu', 'enabled', 'pricing', 'risk', 'model', 'source', 'code', 'that', 'runs', '300x', 'faster', 'than', 'serial', 'code', 'using', 'new', 'nvidia', 'fermi', 'class', 'tesla', 'series', 'gpu', 'scifinance', 'derivatives', 'pricing', 'and', 'risk', 'model', 'development', 'tool', 'that', 'automatically', 'generates', 'and', 'gpu', 'enabled', 'source', 'code', 'from', 'concise', 'high', 'level', 'model', 'specifications', 'parallel', 'computing', 'cuda', 'programming', 'expertise', 'required', 'scifinance', 'automatic', 'gpu', 'enabled', 'monte', 'carlo', 'pricing', 'model', 'source', 'code', 'generation', 'capabilities', 'have', 'been', 'significantly', 'extended', 'the', 'latest', 'release', 'this', 'includes']\n",
      "the error rate is:  0.3\n",
      "numwords: 694\n",
      "[ 1.  1.  2.  1.  1.  2.  1.  1.  3.  1.  2.  1.  2.  1.  1.  1.  2.  1.\n",
      "  1.  1.  3.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  2.  2.  1.  2.\n",
      "  1.  1.  2.  1.  1.  2.  1.  1.  1.  1.  1.  3.  2.  1.  1.  1.  1.  1.\n",
      "  2.  2.  3.  1.  1.  1.  2.  2.  1.  2.  1.  1.  2.  1.  1.  1.  3.  1.\n",
      "  1.  1.  1.  2.  1.  1.  3.  1.  1.  1. 13.  1.  1.  1.  1.  1.  1.  1.\n",
      "  1.  1.  1.  3.  2.  3.  1.  2. 17.  1.  1.  1.  1.  1.  1.  3.  1.  2.\n",
      "  1.  1.  1.  2.  1.  2.  1.  1.  2.  1.  1.  1.  2.  7.  1.  1.  1.  1.\n",
      "  1.  4.  2.  2.  1.  1.  7.  2.  1.  1.  1.  1.  1.  2.  1.  2.  1.  1.\n",
      "  1.  1.  1.  1.  1.  1.  1.  2.  1.  1.  1.  1.  1.  2.  1.  1.  1.  6.\n",
      "  1.  1.  1.  4.  3.  2.  1.  1.  1. 11.  1.  1.  1.  2.  2.  1.  4.  6.\n",
      "  1.  1.  1.  1.  2.  1.  1.  1.  1.  2.  2.  1.  1.  1.  2.  1.  3.  1.\n",
      "  1.  1.  1.  1.  1.  6.  1.  1.  2.  1.  1.  1.  1.  2.  2.  1.  1.  1.\n",
      "  4.  1.  1.  1.  1.  1.  1.  1.  2.  1.  1.  3.  1.  2.  4.  2.  2.  2.\n",
      "  1.  2.  1.  1.  2.  2.  1.  1.  2.  2.  2.  2.  2.  1.  2.  1.  1.  1.\n",
      "  1.  1.  2.  1.  2.  2.  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  1.  1.\n",
      "  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  3.  1.\n",
      "  2.  1.  1.  1.  1.  1.  2.  1.  1.  1.  4.  1.  1.  2.  2.  1.  2.  2.\n",
      "  1.  1.  1.  2.  1.  1.  1.  1.  1.  2.  1.  1.  1.  2.  1.  1.  1.  1.\n",
      "  1.  1.  1.  2.  2.  3.  3.  1.  2.  2.  1.  1.  1.  1.  1.  2.  1.  1.\n",
      "  1.  2.  5.  1.  2.  1.  1.  1.  1.  1.  3.  1.  1.  1.  1.  1.  1.  2.\n",
      "  1.  1.  1.  1.  1.  1.  1.  1.  2.  1.  1.  1.  1.  1.  1.  1.  1.  1.\n",
      "  2.  6. 15.  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  2.\n",
      "  1.  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  2.  1.  2.  1.  1.  1.  1.\n",
      "  1.  1.  2.  1.  2.  1.  2.  1.  2.  1.  2.  2.  4.  1.  3.  1.  1.  1.\n",
      "  1.  1.  2.  1.  1.  1.  1.  1.  1.  1.  2.  1.  1.  1.  2.  1.  1.  1.\n",
      "  1.  2.  1.  1.  2.  1.  1.  1.  4.  3.  2.  1.  1.  2.  1.  1.  1.  2.\n",
      "  2.  1.  1.  1.  1.  1.  6.  1.  1.  2.  1.  1.  1.  1.  1.  2.  1.  1.\n",
      "  2.  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  1.  2.  1.  1.  2.  3.\n",
      "  1.  1.  1.  1.  1.  1.  2.  1.  1.  1.  5.  2.  1.  5.  1.  1.  2.  2.\n",
      "  1.  2.  2.  1.  2.  1.  1.  1.  2.  1.  4.  1.  1.  1.  1.  1.  2.  2.\n",
      "  1.  1.  1.  1.  1.  1.  4.  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  6.\n",
      "  1.  1.  1.  1.  6.  2.  1.  4.  1.  2.  3.  2.  1.  3.  1.  1.  1.  1.\n",
      "  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  1.  1.  1.  1.  2.  2.  2.\n",
      "  1.  1.  1.  1.  2.  1.  2.  2.  1.  2.  2.  1.  1.  1.  1.  1.  1. 11.\n",
      "  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  1.  2.  2.  1.  2.  2.  1.\n",
      "  1.  1.  1.  1.  1.  1.  2.  1.  2.  2.  1.  1.  1.  1.  2.  1.  1.  2.\n",
      "  1.  1.  1.  1.  2.  1.  1.  2.  1.  1.  3.  1.  1.  1.  1.  1.  1.  2.\n",
      "  2.  1.  1.  1.  1.  1.  1.  1.  2.  2.  1.  1.  1.  1.  2.  1.  1.  1.\n",
      "  2.  3.  1.  1.  1.  1.  1.  1.  1.  2.]\n",
      "classification error ['ordercializviagra', 'online', 'save', '0nline', 'pharmacy', 'noprescription', 'required', 'buy', 'canadian', 'drugs', 'wholesale', 'prices', 'and', 'save', 'fda', 'approved', 'drugs', 'superb', 'quality', 'drugs', 'only', 'accept', 'all', 'major', 'credit', 'cards']\n",
      "classification error ['arvind', 'thirumalai', 'commented', 'your', 'status', 'arvind', 'wrote', 'you', 'know', 'reply', 'this', 'email', 'comment', 'this', 'status']\n",
      "classification error ['yeah', 'ready', 'may', 'not', 'here', 'because', 'jar', 'jar', 'has', 'plane', 'tickets', 'germany', 'for']\n",
      "classification error ['what', 'going', 'there', 'talked', 'john', 'email', 'talked', 'about', 'some', 'computer', 'stuff', 'that', 'went', 'bike', 'riding', 'the', 'rain', 'was', 'not', 'that', 'cold', 'went', 'the', 'museum', 'yesterday', 'was', 'get', 'and', 'they', 'had', 'free', 'food', 'the', 'same', 'time', 'was', 'giants', 'game', 'when', 'got', 'done', 'had', 'take', 'the', 'train', 'with', 'all', 'the', 'giants', 'fans', 'they', 'are', 'drunk']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['experience', 'with', 'biggerpenis', 'today', 'grow', 'inches', 'more', 'the', 'safest', 'most', 'effective', 'methods', 'of_penisen1argement', 'save', 'your', 'time', 'and', 'money', 'bettererections', 'with', 'effective', 'ma1eenhancement', 'products', 'ma1eenhancement', 'supplement', 'trusted', 'millions', 'buy', 'today']\n",
      "classification error ['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']\n",
      "classification error ['thanks', 'peter', 'definitely', 'check', 'this', 'how', 'your', 'book', 'going', 'heard', 'chapter', 'came', 'and', 'was', 'good', 'shape', 'hope', 'you', 'are', 'doing', 'well', 'cheers', 'troy']\n",
      "classification error ['codeine', 'the', 'most', 'competitive', 'price', 'net', 'codeine', 'wilson', '30mg', '156', 'codeine', 'wilson', '30mg', '291', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '396', 'freeviagra', 'pills', 'codeine', 'wilson', '30mg', '120', '492', 'freeviagra', 'pills']\n",
      "classification error ['this', 'mail', 'was', 'sent', 'from', 'notification', 'only', 'address', 'that', 'cannot', 'accept', 'incoming', 'mail', 'please', 'not', 'reply', 'this', 'message', 'thank', 'you', 'for', 'your', 'online', 'reservation', 'the', 'store', 'you', 'selected', 'has', 'located', 'the', 'item', 'you', 'requested', 'and', 'has', 'placed', 'hold', 'your', 'name', 'please', 'note', 'that', 'all', 'items', 'are', 'held', 'for', 'day', 'please', 'note', 'store', 'prices', 'may', 'differ', 'from', 'those', 'online', 'you', 'have', 'questions', 'need', 'assistance', 'with', 'your', 'reservation', 'please', 'contact', 'the', 'store', 'the', 'phone', 'number', 'listed', 'below', 'you', 'can', 'also', 'access', 'store', 'information', 'such', 'store', 'hours', 'and', 'location', 'the', 'web', 'http', 'www', 'borders', 'com', 'online', 'store', 'storedetailview_98']\n",
      "classification error ['buy', 'ambiem', 'zolpidem', '5mg', '10mg', 'pill', 'pills', '129', 'pills', '199', '180', 'pills', '430', 'pills', '138', '120', 'pills', '322']\n",
      "classification error ['codeine', '15mg', 'for', '203', 'visa', 'only', 'codeine', 'methylmorphine', 'narcotic', 'opioid', 'pain', 'reliever', 'have', '15mg', '30mg', 'pills', '15mg', 'for', '203', '15mg', 'for', '385', '15mg', 'for', '562', 'visa', 'only']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "classification error ['peter', 'these', 'are', 'the', 'only', 'good', 'scenic', 'ones', 'and', 'too', 'bad', 'there', 'was', 'girl', 'back', 'one', 'them', 'just', 'try', 'enjoy', 'the', 'blue', 'sky']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['ryan', 'whybrew', 'commented', 'your', 'status', 'ryan', 'wrote', 'turd', 'ferguson', 'butt', 'horn']\n",
      "classification error ['oem', 'adobe', 'microsoft', 'softwares', 'fast', 'order', 'and', 'download', 'microsoft', 'office', 'professional', 'plus', '2007', '2010', '129', 'microsoft', 'windows', 'ultimate', '119', 'adobe', 'photoshop', 'cs5', 'extended', 'adobe', 'acrobat', 'pro', 'extended', 'windows', 'professional', 'thousand', 'more', 'titles']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe', 'the', 'proven', 'naturalpenisenhancement', 'that', 'works', '100', 'moneyback', 'guaranteeed']\n",
      "classification error ['that', 'cold', 'there', 'going', 'retirement', 'party', 'are', 'the', 'leaves', 'changing', 'color']\n",
      "classification error ['bargains', 'here', 'buy', 'phentermin', 'buy', 'genuine', 'phentermin', 'low', 'cost', 'visa', 'accepted', '130', '219', '292', '120', '366', '180', '513']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['peter', 'with', 'jose', 'out', 'town', 'you', 'want', 'meet', 'once', 'while', 'keep', 'things', 'going', 'and', 'some', 'interesting', 'stuff', 'let', 'know', 'eugene']\n",
      "classification error ['thought', 'about', 'this', 'and', 'think', 'possible', 'should', 'get', 'another', 'lunch', 'have', 'car', 'now', 'and', 'could', 'come', 'pick', 'you', 'this', 'time', 'does', 'this', 'wednesday', 'work', 'can', 'have', 'signed', 'copy', 'you', 'book']\n",
      "classification error ['percocet', '625', 'withoutprescription', 'tabs', '225', 'percocet', 'narcotic', 'analgesic', 'used', 'treat', 'moderate', 'moderately', 'severepain', 'top', 'quality', 'express', 'shipping', '100', 'safe', 'discreet', 'private', 'buy', 'cheap', 'percocet', 'online']\n",
      "classification error ['get', 'off', 'online', 'watchesstore', 'discount', 'watches', 'for', 'all', 'famous', 'brands', 'watches', 'arolexbvlgari', 'dior', 'hermes', 'oris', 'cartier', 'and', 'more', 'brands', 'louis', 'vuitton', 'bags', 'wallets', 'gucci', 'bags', 'tiffany', 'jewerly', 'enjoy', 'full', 'year', 'warranty', 'shipment', 'via', 'reputable', 'courier', 'fedex', 'ups', 'dhl', 'and', 'ems', 'speedpost', 'you', 'will', '100', 'recieve', 'your', 'order']\n",
      "classification error ['zach', 'hamm', 'commented', 'your', 'status', 'zach', 'wrote', 'doggy', 'style', 'enough', 'said', 'thank', 'you', 'good', 'night']\n",
      "classification error ['saw', 'this', 'the', 'way', 'the', 'coast', 'thought', 'might', 'like', 'hangzhou', 'huge', 'one', 'day', 'wasn', 'enough', 'but', 'got', 'glimpse', 'went', 'inside', 'the', 'china', 'pavilion', 'expo', 'pretty', 'interesting', 'each', 'province', 'has', 'exhibit']\n",
      "classification error ['scifinance', 'now', 'automatically', 'generates', 'gpu', 'enabled', 'pricing', 'risk', 'model', 'source', 'code', 'that', 'runs', '300x', 'faster', 'than', 'serial', 'code', 'using', 'new', 'nvidia', 'fermi', 'class', 'tesla', 'series', 'gpu', 'scifinance', 'derivatives', 'pricing', 'and', 'risk', 'model', 'development', 'tool', 'that', 'automatically', 'generates', 'and', 'gpu', 'enabled', 'source', 'code', 'from', 'concise', 'high', 'level', 'model', 'specifications', 'parallel', 'computing', 'cuda', 'programming', 'expertise', 'required', 'scifinance', 'automatic', 'gpu', 'enabled', 'monte', 'carlo', 'pricing', 'model', 'source', 'code', 'generation', 'capabilities', 'have', 'been', 'significantly', 'extended', 'the', 'latest', 'release', 'this', 'includes']\n",
      "classification error ['hommies', 'just', 'got', 'phone', 'call', 'from', 'the', 'roofer', 'they', 'will', 'come', 'and', 'spaying', 'the', 'foaming', 'today', 'will', 'dusty', 'pls', 'close', 'all', 'the', 'doors', 'and', 'windows', 'could', 'you', 'help', 'close', 'bathroom', 'window', 'cat', 'window', 'and', 'the', 'sliding', 'door', 'behind', 'the', 'don', 'know', 'how', 'can', 'those', 'cats', 'survive', 'sorry', 'for', 'any', 'inconvenience']\n",
      "classification error ['hydrocodone', 'vicodin', 'brand', 'watson', 'vicodin', '750', '195', '120', '570', 'brand', 'watson', '750', '195', '120', '570', 'brand', 'watson', '325', '199', '120', '588', 'noprescription', 'required', 'free', 'express', 'fedex', 'days', 'delivery', 'for', 'over', '200', 'order', 'major', 'credit', 'cards', 'check']\n",
      "classification error ['linkedin', 'kerry', 'haloney', 'requested', 'add', 'you', 'connection', 'linkedin', 'peter', 'like', 'add', 'you', 'professional', 'network', 'linkedin', 'kerry', 'haloney']\n",
      "classification error ['you', 'have', 'everything', 'gain', 'incredib1e', 'gains', 'length', 'inches', 'yourpenis', 'permanantly', 'amazing', 'increase', 'thickness', 'yourpenis', 'betterejacu1ation', 'control', 'experience', 'rock', 'harderecetions', 'explosive', 'intenseorgasns', 'increase', 'volume', 'ofejacu1ate', 'doctor', 'designed', 'and', 'endorsed', '100', 'herbal', '100', 'natural', '100', 'safe']\n",
      "classification error ['peter', 'sure', 'thing', 'sounds', 'good', 'let', 'know', 'what', 'time', 'would', 'good', 'for', 'you', 'will', 'come', 'prepared', 'with', 'some', 'ideas', 'and', 'can', 'from', 'there', 'regards', 'vivek']\n",
      "classification error ['ordercializviagra', 'online', 'save', '0nline', 'pharmacy', 'noprescription', 'required', 'buy', 'canadian', 'drugs', 'wholesale', 'prices', 'and', 'save', 'fda', 'approved', 'drugs', 'superb', 'quality', 'drugs', 'only', 'accept', 'all', 'major', 'credit', 'cards', 'order', 'today', 'from']\n",
      "classification error ['percocet', '625', 'withoutprescription', 'tabs', '225', 'percocet', 'narcotic', 'analgesic', 'used', 'treat', 'moderate', 'moderately', 'severepain', 'top', 'quality', 'express', 'shipping', '100', 'safe', 'discreet', 'private', 'buy', 'cheap', 'percocet', 'online']\n",
      "classification error ['get', 'off', 'online', 'watchesstore', 'discount', 'watches', 'for', 'all', 'famous', 'brands', 'watches', 'arolexbvlgari', 'dior', 'hermes', 'oris', 'cartier', 'and', 'more', 'brands', 'louis', 'vuitton', 'bags', 'wallets', 'gucci', 'bags', 'tiffany', 'jewerly', 'enjoy', 'full', 'year', 'warranty', 'shipment', 'via', 'reputable', 'courier', 'fedex', 'ups', 'dhl', 'and', 'ems', 'speedpost', 'you', 'will', '100', 'recieve', 'your', 'order']\n",
      "classification error ['jay', 'stepp', 'commented', 'your', 'status', 'jay', 'wrote', 'the', 'reply', 'this', 'email', 'comment', 'this', 'status', 'see', 'the', 'comment', 'thread', 'follow', 'the', 'link', 'below']\n",
      "classification error ['peter', 'the', 'hotels', 'are', 'the', 'ones', 'that', 'rent', 'out', 'the', 'tent', 'they', 'are', 'all', 'lined', 'the', 'hotel', 'grounds', 'much', 'for', 'being', 'one', 'with', 'nature', 'more', 'like', 'being', 'one', 'with', 'couple', 'dozen', 'tour', 'groups', 'and', 'nature', 'have', 'about', '100m', 'pictures', 'from', 'that', 'trip', 'can', 'through', 'them', 'and', 'get', 'you', 'jpgs', 'favorite', 'scenic', 'pictures', 'where', 'are', 'you', 'and', 'jocelyn', 'now', 'new', 'york', 'will', 'you', 'come', 'tokyo', 'for', 'chinese', 'new', 'year', 'perhaps', 'see', 'the', 'two', 'you', 'then', 'will', 'thailand', 'for', 'winter', 'holiday', 'see', 'mom', 'take', 'care']\n",
      "the error rate is:  0.45\n",
      "zly: 0.2625\n"
     ]
    }
   ],
   "source": [
    "# 垃圾邮件检测\n",
    "def spam_text():\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    doc_list = []\n",
    "    #表示每一个数据对应的标签\n",
    "    class_list = []\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    full_text = []\n",
    "    # 导入并且解析文本\n",
    "    for i in range(1, 26):\n",
    "        word_list = text_parse(open('email/spam/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(1)\n",
    "\n",
    "        word_list = text_parse(open('email/ham/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(0)\n",
    "    #将所有出现的单词放入vocab_list\n",
    "    vocab_list = create_vocab_list(doc_list)\n",
    "    #创建一个0-49的一维数组【0 1 2 .。。49】\n",
    "    train_set = list(range(50))\n",
    "    #创建一个数组为空，test_set作为测试集\n",
    "    test_set = []\n",
    "    #初始化错误率\n",
    "    zly_errors=0.0\n",
    "    #留存交叉验证的次数，\n",
    "    #下面for是自己修改的代码\n",
    "    num=4\n",
    "    for z in range(num):\n",
    "        # 随机构建训练集，测试集\n",
    "        for i in range(10):\n",
    "            #随机产生一个0-50的数字，从train_set中取出对应的数据放入test_set,总共取10次\n",
    "            rand_index = int(random.randint(0, 50))\n",
    "            while rand_index not in train_set :\n",
    "                rand_index = int(random.randint(0, 50))\n",
    "#            print('rand_index',rand_index)\n",
    "#             print('train_set[rand_index]',train_set[rand_index])\n",
    "            test_set.append(rand_index)\n",
    "            train_set.remove(rand_index)\n",
    "        train_matrix = []\n",
    "        train_class = []\n",
    "    # 对测试集合进行分类\n",
    "        for doc_index in train_set:\n",
    "            #对训练集的数据集进行处理，处理成【0 1 2 0.。。。】，加入train_matrix\n",
    "            train_matrix.append(bag_of_word_vec(vocab_list, doc_list[doc_index]))\n",
    "            #标签加入train_class\n",
    "            train_class.append(class_list[doc_index])\n",
    "        #train_matrix表示训练集有40个后面的矩阵（2维）【0，1，0，0，0，0，1，。。。。。。】（所有的单词，哪个单词出现了）\n",
    "        #train_class表示【0，1，0，1.。。。】（表示邮件是否违规（0或者1））\n",
    "        #得到p0_vec, p1_vec, p_spam（可以说是模型）\n",
    "        p0_vec, p1_vec, p_spam = train(array(train_matrix), array(train_class))\n",
    "        error_count = 0\n",
    "        #测试得到的p0_vec, p1_vec, p_spam准确率\n",
    "        for doc_index in test_set:\n",
    "            word_vector = bag_of_word_vec(vocab_list, doc_list[doc_index])\n",
    "            if classify(array(word_vector), p0_vec, p1_vec, p_spam) != class_list[doc_index]:\n",
    "                error_count += 1\n",
    "            print(\"classification error\", doc_list[doc_index])\n",
    "        f01=float(error_count) / len(test_set)\n",
    "        print('the error rate is: ', float(error_count) / len(test_set))\n",
    "        zly_errors+=f01\n",
    "    print('zly:',zly_errors/num)\n",
    "spam_text()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 1, 2, 3, 4]\n",
      "0\n",
      "4\n",
      "2\n",
      "[1, 3]\n",
      "[0, 4, 2]\n"
     ]
    }
   ],
   "source": [
    "a=list(range(5))#创建一个数组【0，1，2，3，4】\n",
    "print(a)\n",
    "z0=[]#创建一个数组为空\n",
    "#取出三个数（不重复）放入z0\n",
    "for i in range(3):\n",
    "    r= int(random.randint(0,5))\n",
    "    print(r)\n",
    "    while r not in a:\n",
    "        r= int(random.randint(0,5))\n",
    "    z0.append(r)\n",
    "    a.remove(r)\n",
    "print(a)\n",
    "print(z0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
