{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "Create by 2018-05-20\n",
    "\n",
    "@author: Shiyipaisizuo\n",
    "\"\"\"\n",
    "\n",
    "from numpy import *\n",
    "import re\n",
    "import operator\n",
    "\n",
    "\n",
    "# 加载数据\n",
    "\n",
    "def load_data_set():\n",
    "    #we define Data，six data about text\n",
    "    posting_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n",
    "                    ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n",
    "                    ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n",
    "                    ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n",
    "                    ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n",
    "                    ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]\n",
    "\n",
    "    # 1代表侮辱性文字， 0代表正常言论\n",
    "    #first have no problem\n",
    "    #second have proble，and soso\n",
    "    class_vec = [0, 1, 0, 1, 0, 1]\n",
    "\n",
    "    return posting_list, class_vec\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个包含在所有文档中出现的不重复的列表\n",
    "def create_vocab_list(data_set):\n",
    "\n",
    "    # 创建一个空集\n",
    "    vocab_set = set([])\n",
    "\n",
    "    for document in data_set:\n",
    "        vocab_set = vocab_set | set(document)\n",
    "    # 创建两个集合的并集\n",
    "    return list(vocab_set)\n",
    "\n",
    "\n",
    "# 输入参数是一个文档，输出的是文档向量\n",
    "#vocab_list:一个包含所有单词的list,input_set:输入的进行分割好的句子如：['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "def set_of_word_vec(vocab_list, input_set):\n",
    "\n",
    "    # 创建一个所含向量都为0的向量\n",
    "    vec = [0] * len(vocab_list)\n",
    "    #遍历我们输入的句子，如果出现某个单词，在vec（包含所有单词的list）对应位置+1\n",
    "    for word in input_set:\n",
    "        if word in vocab_list:\n",
    "            vec[vocab_list.index(word)] += 1\n",
    "#     返回出现单词的list\n",
    "    return vec\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['maybe',\n",
       " 'problems',\n",
       " 'take',\n",
       " 'park',\n",
       " 'ate',\n",
       " 'posting',\n",
       " 'stop',\n",
       " 'garbage',\n",
       " 'I',\n",
       " 'food',\n",
       " 'buying',\n",
       " 'to',\n",
       " 'dog',\n",
       " 'help',\n",
       " 'is',\n",
       " 'dalmation',\n",
       " 'him',\n",
       " 'worthless',\n",
       " 'has',\n",
       " 'not',\n",
       " 'love',\n",
       " 'my',\n",
       " 'cute',\n",
       " 'mr',\n",
       " 'stupid',\n",
       " 'so',\n",
       " 'flea',\n",
       " 'quit',\n",
       " 'how',\n",
       " 'please',\n",
       " 'licks',\n",
       " 'steak']"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "listoposts,listclasses=load_data_set()\n",
    "myvocablist=create_vocab_list(listoposts)\n",
    "myvocablist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯分类器的训练\n",
    "#train_matrix：表示训练的集合,在我们测试中，我们的训练集为6*32的矩阵如下一个cell\n",
    "# train_category：表示训练集对应的1*6的矩阵，测试中如[0, 1, 0, 1, 0, 1]\n",
    "def train(train_matrix, train_category):\n",
    "#     print(train_matrix)\n",
    "#     print('``````````````')\n",
    "#     print(train_category) \n",
    "#     print('```````````')\n",
    "    #计算一共有多少个训练的数据\n",
    "    num_train_docs = len(train_matrix)\n",
    "    #有我自己修改的成分：计算一共有多少不同的单词\n",
    "    num_words=0\n",
    "    for i in range(num_train_docs):\n",
    "        if train_matrix[i] is not None:\n",
    "            num_words = len(train_matrix[i])\n",
    "#     print('numwords:',num_words)\n",
    "    #计算p_abusive：所有邮件中，违规的概率\n",
    "    p_abusive = sum(train_category) / float(num_train_docs)\n",
    "#     print('num1:',num_train_docs)\n",
    "\n",
    "    # 初始化概率，矩阵[1 1 1 1 1 1 1......]\n",
    "    p0_num = ones(num_words)\n",
    "    p1_num = ones(num_words)\n",
    "    p0_denom = 2.0\n",
    "    p1_denom = 2.0\n",
    "    # 向量相加\n",
    "#   for 循环所有的训练数据，如果训练的数据的标签是1，\n",
    "#   p1_num：【1 1 1 1 .。。。】加上对应的矩阵（如【0 1 0 1 1.。。。】）矩阵加法\n",
    "#   p1_denom：计算所有违规的单词数\n",
    "    for i in range(num_train_docs):\n",
    "        if train_category[i] == 1:\n",
    "            p1_num += train_matrix[i]\n",
    "            p1_denom += sum(train_matrix[i])\n",
    "#             print('p1:',p1_num)\n",
    "#             print('p1_denom',p1_denom)\n",
    "        else:\n",
    "            p0_num += train_matrix[i]\n",
    "            p0_denom += sum(train_matrix[i])\n",
    "\n",
    "    # 对每个元素做除法\n",
    "    #最终得到：p1_num / p1_denom，值可能如下\n",
    "    #p1_num ：【2 5 1 0 6 .。。。。】，\n",
    "    #p1_denom：40\n",
    "    p1_vec = log(p1_num / p1_denom)\n",
    "    p0_vec = log(p0_num / p0_denom)\n",
    "    #返回两个矩阵[-3.13549422 -3.13549422 -3.13549422 -3.13549422 。。。。。。]，一个概率\n",
    "    return p0_vec, p1_vec, p_abusive"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0]]\n"
     ]
    }
   ],
   "source": [
    "trainMat=[]\n",
    "for postindoc in listoposts:\n",
    "    trainMat.append(set_of_word_vec(myvocablist,postindoc))\n",
    "print(trainMat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[-3.25809654 -2.56494936 -3.25809654 -3.25809654 -2.56494936 -3.25809654\n",
      " -2.56494936 -3.25809654 -2.56494936 -3.25809654 -3.25809654 -2.56494936\n",
      " -2.56494936 -2.56494936 -2.56494936 -2.56494936 -2.15948425 -3.25809654\n",
      " -2.56494936 -3.25809654 -2.56494936 -1.87180218 -2.56494936 -2.56494936\n",
      " -3.25809654 -2.56494936 -2.56494936 -3.25809654 -2.56494936 -2.56494936\n",
      " -2.56494936 -2.56494936] [-2.35137526 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -2.35137526\n",
      " -2.35137526 -2.35137526 -3.04452244 -2.35137526 -2.35137526 -2.35137526\n",
      " -1.94591015 -3.04452244 -3.04452244 -3.04452244 -2.35137526 -1.94591015\n",
      " -3.04452244 -2.35137526 -3.04452244 -3.04452244 -3.04452244 -3.04452244\n",
      " -1.65822808 -3.04452244 -3.04452244 -2.35137526 -3.04452244 -3.04452244\n",
      " -3.04452244 -3.04452244] 0.5\n"
     ]
    }
   ],
   "source": [
    "p0v,p1v,pAb=train(trainMat,listclasses)\n",
    "print(p0v,p1v,pAb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的分类\n",
    "#vec_classify:我们输入的处理好的句子对应哪个单词是否出现，如：[1 0 1 0 0.....]\n",
    "#p0_vec, p1_vec, p_class就是train返回三个值\n",
    "def classify(vec_classify, p0_vec, p1_vec, p_class):\n",
    "    p1 = sum(vec_classify * p1_vec) + log(p_class)\n",
    "    p0 = sum(vec_classify * p0_vec) + log(1.0 - p_class)\n",
    "#     print(p1)\n",
    "#     print(p0)\n",
    "    if p1 > p0:\n",
    "        return 1\n",
    "    else:\n",
    "        return 0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的词袋模型，同set_of_word_vec(vocab_list, input_set):\n",
    "def bag_of_word_vec(vocab_list, inpiut_set):\n",
    "    vec = [0] * len(vocab_list)\n",
    "    for word in inpiut_set:\n",
    "        if word in vocab_list:\n",
    "            vec[vocab_list.index(word)] += 1\n",
    "\n",
    "    return vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 朴素贝叶斯的测试\n",
    "def test():\n",
    "    list_of_posts, list_classes = load_data_set()\n",
    "    my_vocal_list = create_vocab_list(list_of_posts)\n",
    "#得到的train_matrix是6*32的矩阵\n",
    "    train_matrix = []\n",
    "    for post_in_doc in list_of_posts:\n",
    "        train_matrix.append(set_of_word_vec(my_vocal_list, post_in_doc))\n",
    "#训练\n",
    "    p0_vec, p1_vec, p_abusive = train(array(train_matrix), array(list_classes))\n",
    "#     print('QQQQQQQ')\n",
    "#     print(p0_vec, p1_vec, p_abusive)\n",
    "    test_entry = ['love', 'my', 'dalmation']\n",
    "    this_doc = array(set_of_word_vec(my_vocal_list, test_entry))\n",
    "#     print('QQQQ',this_doc)\n",
    "    print(test_entry, 'classified as:', classify(this_doc,p0_vec, p1_vec, p_abusive))\n",
    "\n",
    "    test_entry = ['stupid', 'garbage']\n",
    "    this_doc = array(set_of_word_vec(my_vocal_list, test_entry))\n",
    "    print(test_entry, 'classified as: ', classify(this_doc, p0_vec, p1_vec, p_abusive))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['love', 'my', 'dalmation'] classified as: 0\n",
      "['stupid', 'garbage'] classified as:  1\n"
     ]
    }
   ],
   "source": [
    "test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['this',\n",
       " 'book',\n",
       " 'the',\n",
       " 'best',\n",
       " 'book',\n",
       " 'python',\n",
       " 'have',\n",
       " 'ever',\n",
       " 'laid',\n",
       " 'eyes',\n",
       " 'upon']"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 切割分类文本\n",
    "def text_parse(big_string):\n",
    "    list_of_tokens = re.split('\\W+', big_string)\n",
    "\n",
    "    return [tok.lower() for tok in list_of_tokens if len(tok) > 2]\n",
    "mySent='This book is the best book on Python or M.L. I !!!!have ever@@@ laid eyes upon.'\n",
    "text_parse(mySent)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "#实战环节：上面很重要！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 垃圾邮件检测\n",
    "def spam_text():\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    doc_list = []\n",
    "    #表示每一个数据对应的标签\n",
    "    class_list = []\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    full_text = []\n",
    "    # 导入并且解析文本\n",
    "    for i in range(1, 26):\n",
    "        word_list = text_parse(open('email/spam/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(1)\n",
    "        \n",
    "        word_list = text_parse(open('email/ham/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(0)\n",
    "    #将所有出现的单词放入vocab_list\n",
    "    vocab_list = create_vocab_list(doc_list)\n",
    "    #创建一个0-49的一维数组【0 1 2 .。。49】\n",
    "    train_set = list(range(50))\n",
    "    #创建一个数组为空，test_set作为测试集\n",
    "    test_set = []\n",
    "    #初始化错误率\n",
    "    zly_errors=0.0\n",
    "    #留存交叉验证的次数，下面for是自己修改的代码\n",
    "    num=4\n",
    "    for z in range(num):\n",
    "    \n",
    "        # 随机构建训练集，测试集\n",
    "        for i in range(10):\n",
    "            #随机产生一个0-50的数字，从train_set中取出对应的数据放入test_set,总共取10次\n",
    "            rand_index = int(random.randint(0, 50))\n",
    "            while rand_index not in train_set :\n",
    "                rand_index = int(random.randint(0, 50))\n",
    "#            print('rand_index',rand_index)\n",
    "#             print('train_set[rand_index]',train_set[rand_index])\n",
    "            test_set.append(rand_index)\n",
    "            train_set.remove(rand_index)\n",
    "        train_matrix = []\n",
    "        train_class = []\n",
    "\n",
    "    # 对测试集合进行分类\n",
    "        for doc_index in train_set:\n",
    "            #对训练集的数据集进行处理，处理成【0 1 2 0.。。。】，加入train_matrix\n",
    "            train_matrix.append(bag_of_word_vec(vocab_list, doc_list[doc_index]))\n",
    "            #标签加入train_class\n",
    "            train_class.append(class_list[doc_index])\n",
    "        #train_matrix表示训练集有40个后面的矩阵（2维）【0，1，0，0，0，0，1，。。。。。。】（所有的单词，哪个单词出现了）\n",
    "        #train_class表示【0，1，0，1.。。。】（表示邮件是否违规（0或者1））\n",
    "        #得到p0_vec, p1_vec, p_spam（可以说是模型）\n",
    "        p0_vec, p1_vec, p_spam = train(array(train_matrix), array(train_class))\n",
    "        error_count = 0\n",
    "        #测试得到的p0_vec, p1_vec, p_spam准确率\n",
    "        for doc_index in test_set:\n",
    "            word_vector = bag_of_word_vec(vocab_list, doc_list[doc_index])\n",
    "            if classify(array(word_vector), p0_vec, p1_vec, p_spam) != class_list[doc_index]:\n",
    "                error_count += 1\n",
    "            print(\"classification error\", doc_list[doc_index])\n",
    "        f01=float(error_count) / len(test_set)\n",
    "        print('the error rate is: ', float(error_count) / len(test_set))\n",
    "        zly_errors+=f01\n",
    "    print('zly:',zly_errors/num)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 1, 2, 3, 4]\n",
      "0\n",
      "0\n",
      "4\n",
      "[1, 3]\n",
      "[0, 4, 2]\n"
     ]
    }
   ],
   "source": [
    "a=list(range(5))#创建一个数组【0，1，2，3，4】\n",
    "print(a)\n",
    "z0=[]#创建一个数组为空\n",
    "#取出三个数（不重复）放入z0\n",
    "for i in range(3):\n",
    "    r= int(random.randint(0,5))\n",
    "    print(r)\n",
    "    while r not in a:\n",
    "        r= int(random.randint(0,5))\n",
    "    z0.append(r)\n",
    "    a.remove(r)\n",
    "print(a)\n",
    "print(z0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_zly():\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    doc_list = []\n",
    "    #表示每一个数据对应的标签\n",
    "    class_list = []\n",
    "    #放入邮件处理好的矩阵,每一行如['my', 'dog', 'has', 'flea', 'problems', 'help', 'please']\n",
    "    full_text = []\n",
    "    # 导入并且解析文本\n",
    "    for i in range(1, 26):\n",
    "        word_list = text_parse(open('email/spam/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(1)\n",
    "        \n",
    "        word_list = text_parse(open('email/ham/%d.txt' % i).read())\n",
    "        doc_list.append(word_list)\n",
    "        full_text.append(word_list)\n",
    "        class_list.append(0)\n",
    "    doc_list\n",
    "    class_list\n",
    "    \n",
    "    for i in range(1):\n",
    "        train_materi=[]\n",
    "        train_index=list(range(1,51))\n",
    "        train_label=[]\n",
    "        test_index=[]\n",
    "        while len(train_index)!=40:\n",
    "            t=random.randint(1,6)\n",
    "            if t in train_index:\n",
    "                train_index.remove(t)\n",
    "                test_index.append(t)\n",
    "        my_vocal_list = create_vocab_list(doc_list)\n",
    "        for j in train_index:\n",
    "            train_materi.append(set_of_word_vec(my_vocal_list, train_index[j]))\n",
    "            train_label.append(class_list[j])\n",
    "        p0_vec, p1_vec, p_spam = train(array(train_materi), array(train_label))\n",
    "        print(test_index)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "5\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[1, 5]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test=list(range(1,6))\n",
    "print(len(test))\n",
    "while len(test)!=2:\n",
    "    t=random.randint(1,6)\n",
    "    if t in test:\n",
    "        test.remove(t)\n",
    "test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_zly()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
