{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "docs = {\n",
    "    'data': [\n",
    "    # 0\n",
    "    'my dog has flea problems help please',\n",
    "    # 1\n",
    "    'maybe not take him to dog park stupid',\n",
    "    # 0\n",
    "    'my dalmation is so cute I love him',\n",
    "     # 1\n",
    "     'stop posting stupid worthless garbage',\n",
    "     # 0\n",
    "    'mr licks ate my stark how to stop him',\n",
    "    # 1\n",
    "    'quit buying worthless dog food stupid'\n",
    " ]\n",
    " # 是否具有 攻击性言论\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "labels = [0,1,0,1,0,1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "docs = list(map(lambda doc: doc.split(' '),docs['data']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def docSetList(docs):\n",
    "    docSet = set([])\n",
    "    for doc in docs:\n",
    "        docSet = docSet | set(doc)\n",
    "\n",
    "\n",
    "    #排序\n",
    "    docList = list(docSet)\n",
    "    docList.sort()\n",
    "    return docList"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "docList = docSetList(docs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def doc2vector(doc, docList):\n",
    "    #1.向量初始化\n",
    "    vector = [0] * len(docList)\n",
    "    for word in doc:\n",
    "        #index:计算word在基中的索引\n",
    "        i = docList.index(word)\n",
    "        vector[i] +=1\n",
    "    return vector"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n",
       " ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\n",
       " ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n",
       " ['stop', 'posting', 'stupid', 'worthless', 'garbage'],\n",
       " ['mr', 'licks', 'ate', 'my', 'stark', 'how', 'to', 'stop', 'him'],\n",
       " ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "docs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "docsVector = list(map(lambda doc: doc2vector(doc, docList),docs))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###条件概率\n",
    "```\n",
    "P(AB) = []\n",
    "P(A|B) = P(B|A)*P(A)/P(B)\n",
    "```\n",
    "\n",
    "语句(特征的向量)、标签\n",
    "\n",
    "```py\n",
    "def classify(doc, DocList)\n",
    "    return label\n",
    "```\n",
    "\n",
    "### classify\n",
    "\n",
    "P(标签|语句)=> P(标签0|语句)  vs P(标签1|语句)\n",
    "\n",
    "=> P(A0|B)  vs  P(A1|B)\n",
    "\n",
    "=> [P(语句|A0) * P(A0)] / P(语句)  vs [P(语句|A1) * P(A1)] / P(语句)\n",
    "\n",
    "=> P(语句|A0) * P(A0) vs P(语句|A1) * P(A1)\n",
    "\n",
    "=> P(单词0... 单词n|A0) * P(A0)  vs P(单词0... 单词n|A1) * P(A1)\n",
    "\n",
    "```\n",
    "P(标签|语句) = 概率 = P(语句|标签)*P(标签)/P(语句)\n",
    "```\n",
    "### 训练的目的：求出P(单词0... 单词n|An)\n",
    "\n",
    "```\n",
    "P(单词0...单词n|An) = P(单词0|An) * P(单词1|An) *...\n",
    "\n",
    "=>\n",
    "\n",
    "A0: P(单词0|A0) * P(单词1|A0) * ... * P(单词n|A0) => 转换成 连加，引入 log运算\n",
    "\n",
    "LOG (P(单词0|A0) * P(单词1|A0) * ... * P(单词n|A0))\n",
    "\n",
    "LOG(P(单词0|A0)) + LOG(P(单词1|A0)) + .... + LOG(P(单词n|A0))\n",
    "\n",
    "A1:  P(单词0|A1) * P(单词1|A1) * ... * P(单词n|A1)\n",
    "\n",
    "```\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "32"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(docList)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.0"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.value_counts('labels',normalize=True)[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 1,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 0,\n",
       " 1,\n",
       " 1,\n",
       " 1,\n",
       " 0]"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "docsVector[0] + docsVector[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def classify(docV, p0V,p1V,p0):\n",
    "    c0 = np.sum(docV*p0V)*np.log(p0)\n",
    "    c1 = np.sum(docV*p0V)*np.log(1-p0)\n",
    "    if c0>c1:\n",
    "        return 0\n",
    "    return 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "6"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "doc_n = len('labels')\n",
    "doc_n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def trainBayes(docsVector,labels):\n",
    "    #文档数量\n",
    "    doc_n = len(labels)\n",
    "    #文档标签概率\n",
    "    p0 = pd.value_counts(labels,normalize=True)[0]\n",
    "    #向量空间维度\n",
    "    dims = len(docsVector[0])\n",
    "\n",
    "    p = {\n",
    "        0 : {},\n",
    "        1 : {}\n",
    "\n",
    "    }\n",
    "\n",
    "    #1. 初始化 分子，分母\n",
    "    p[0]['num'],p[1]['num'] = np.ones(dims),np.ones(dims)\n",
    "    p[0]['Denum'],p[1]['Denum'] = 2, 2\n",
    "\n",
    "    for i in range(doc_n):\n",
    "        #单词出现次数\n",
    "        p[labels[i]]['num']+= docsVector[i]\n",
    "        #单词总数\n",
    "        p[labels[i]]['Denum'] += sum(docsVector[i])\n",
    "    #返回参数\n",
    "    p0V = np.log(p[0]['num']/p[0]['Denum'])\n",
    "    p1V = np.log(p[1]['num']/p[1]['Denum'])\n",
    "    return p0V,p1V,p0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(array([-2.56494936, -2.56494936, -3.25809654, -2.56494936, -2.56494936,\n",
       "        -2.56494936, -2.56494936, -3.25809654, -3.25809654, -2.56494936,\n",
       "        -2.56494936, -2.15948425, -2.56494936, -2.56494936, -2.56494936,\n",
       "        -2.56494936, -3.25809654, -2.56494936, -1.87180218, -3.25809654,\n",
       "        -3.25809654, -2.56494936, -3.25809654, -2.56494936, -3.25809654,\n",
       "        -2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654,\n",
       "        -2.56494936, -3.25809654]),\n",
       " array([-3.04452244, -3.04452244, -2.35137526, -3.04452244, -3.04452244,\n",
       "        -1.94591015, -3.04452244, -2.35137526, -2.35137526, -3.04452244,\n",
       "        -3.04452244, -2.35137526, -3.04452244, -3.04452244, -3.04452244,\n",
       "        -3.04452244, -2.35137526, -3.04452244, -3.04452244, -2.35137526,\n",
       "        -2.35137526, -3.04452244, -2.35137526, -3.04452244, -2.35137526,\n",
       "        -3.04452244, -3.04452244, -2.35137526, -1.65822808, -2.35137526,\n",
       "        -2.35137526, -1.94591015]),\n",
       " 0.5)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "p0V,p1V,p0 = trainBayes(docsVector,labels)\n",
    "p0V,p1V,p0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "classify(doc2vector('I love him'.split(' '),docList),p0V,p1V,p0)\n",
    "#qq"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.8.9 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.9"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "774a858b74b9162cfeaabab2cd52460f9ad2aea53b492b6366c174bcba156af9"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
