{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from collections import defaultdict\n",
    "from functools import reduce\n",
    "import math\n",
    "\n",
    "class NaiveBayes:\n",
    "\n",
    "    def __init__(self):\n",
    "\n",
    "        #freqFeature[feature] = frequencia\n",
    "        self.freqFeature = defaultdict(int)\n",
    "        \n",
    "        #freqLabel[classe] = frequencia\n",
    "        #label = classe\n",
    "        self.freqLabel = defaultdict(int)\n",
    "\n",
    "        #Estrutura -> condFreqFeature[label][feature] = frequencia\n",
    "        self.condFreqFeature = defaultdict(lambda: defaultdict(int))\n",
    "\n",
    "    def countFrequencies(self):\n",
    "        allFeatures = reduce(lambda x, y: x+y, self.dataSet_x)\n",
    "\n",
    "        \n",
    "        for f in allFeatures:\n",
    "            self.freqFeature[f] += 1\n",
    "\n",
    "        for l in self.dataSet_y:\n",
    "            self.freqLabel[l] += 1\n",
    "\n",
    "    def countCondFrequencies(self):\n",
    "\n",
    "        dataSet = list(zip(self.dataSet_x, self.dataSet_y)) #A partir de python 3, zip retorna object. Entao mudo para list\n",
    "\n",
    "        for t in dataSet:\n",
    "            for f in t[0]:\n",
    "                # condFreqFeature[label][feature]\n",
    "                self.condFreqFeature[t[1]][f] += 1\n",
    "\n",
    "\n",
    "    def train(self, dataSet_x, dataSet_y):\n",
    "\n",
    "        self.dataSet_x = dataSet_x\n",
    "        self.dataSet_y = dataSet_y\n",
    "\n",
    "        self.countFrequencies()\n",
    "        self.countCondFrequencies()\n",
    "\n",
    "\n",
    "    def probLikelihood(self, f, l, vocabulary):\n",
    "        laplace = 1\n",
    "\n",
    "        condFreq = self.condFreqFeature[l][f]\n",
    "        prob = (float)(condFreq + laplace) / (self.freqLabel[l] + vocabulary)\n",
    "       \n",
    "        return prob\n",
    "\n",
    "    def predict(self, dataSet_x):\n",
    "\n",
    "        # Correcao de Laplace\n",
    "        # P( f | l) = (freq( f | l ) + laplace*) / ( freq(l)** + qnt(distinct(f))*** )\n",
    "        #\n",
    "        # * -> laplace smoothing: add 1\n",
    "        # ** -> Frequencia com que o valor de label aparece\n",
    "        # *** -> Quantidade de features distintas\n",
    "        #\n",
    "\n",
    "        # Devido a possibilidade de underflow de pontos flutuantes, eh interessante fazer\n",
    "        # P(x1|l)*P(x2|l) ... -> exp(Log(P(x1|l)) + Log(P(x2|l))) ...\n",
    "\n",
    "\n",
    "\n",
    "        probs = []\n",
    "        totalTuples = len(self.dataSet_y)\n",
    "        vocabulary = len(self.freqFeature)\n",
    "\n",
    "        #Cada tupla\n",
    "        for index, t in enumerate(dataSet_x):\n",
    "            probs.append(defaultdict(float))\n",
    "\n",
    "            #Cada label\n",
    "            for l in self.dataSet_y:\n",
    "                prob = 0.0\n",
    "\n",
    "                #Cada feature\n",
    "                for f in t:\n",
    "                    prob += math.log(self.probLikelihood(f, l, vocabulary))\n",
    "\n",
    "                prob += (self.freqLabel[l] / totalTuples)\n",
    "\n",
    "                probs[index][l] = math.exp(prob)\n",
    "\n",
    "        return probs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.805394990366\n"
     ]
    }
   ],
   "source": [
    "import random\n",
    "import math\n",
    "\n",
    "# Car dataset\n",
    "# Attribute Information:\n",
    "#\n",
    "# Class Values:\n",
    "#\n",
    "# unacc, acc, good, vgood\n",
    "#\n",
    "# Attributes:\n",
    "#\n",
    "# buying: vhigh, high, med, low.\n",
    "# maint: vhigh, high, med, low.\n",
    "# doors: 2, 3, 4, 5more.\n",
    "# persons: 2, 4, more.\n",
    "# lug_boot: small, med, big.\n",
    "# safety: low, med, high.\n",
    "\n",
    "#Retur dataset\n",
    "def readFile(path):\n",
    "    rawDataset = open(path, 'r')\n",
    "\n",
    "    #Adicionamos os sufixos para cada valor de feature relativo a sua coluna,\n",
    "    #para que a contagem de frequencias nao conflite com valores semelhantes em diferentes features\n",
    "    suffix = ['_buy', '_maint', '_doors', '_pers', '_lug', '_safety', '_class']\n",
    "\n",
    "    dataset = []\n",
    "\n",
    "    rawDataset.seek(0)\n",
    "    for line in rawDataset:\n",
    "    \tl = line.split(',')\n",
    "        #Elimina o caractere de breakline do texto\n",
    "        l[-1] = l[-1].replace(\"\\n\", \"\")\n",
    "        newTuple = map(lambda (x,y): x+y, zip( l , suffix))\n",
    "        dataset.append( newTuple )\n",
    "\n",
    "    return dataset\n",
    "\n",
    "def main():\n",
    "\n",
    "    preparedDataset = readFile('carData')\n",
    "\n",
    "    #Para toda execucao da main, randomiza os dados\n",
    "    random.shuffle(preparedDataset)\n",
    "\n",
    "    dataset = []\n",
    "    #Features\n",
    "    dataset.append([])\n",
    "    #Label\n",
    "    dataset.append([])\n",
    "\n",
    "    #Separa para dataset[0] como um vetor de vetores, onde cada elemento eh uma linha de features\n",
    "    #Para dataset[1] eh o vetor com as labels (classes)\n",
    "    for t in preparedDataset:\n",
    "        dataset[0].append(t[:-1])\n",
    "        dataset[1].append(t[-1])\n",
    "\n",
    "    #Conjunto de features\n",
    "    dataSet_x = dataset[0]\n",
    "    #Conjunto de classes\n",
    "    dataSet_y = dataset[1]\n",
    "    #Repare acima, dataSet_x[0] representa as features da linha 1 do conjunto, bem como dataSet_y[0] eh a classe da linha 1\n",
    "\n",
    "    nTuples = len(dataSet_x)\n",
    "\n",
    "    nToTrain = int(math.floor(nTuples * 0.7))\n",
    "\n",
    "    dataSet_x_train = dataSet_x[:nToTrain]\n",
    "    dataSet_y_train = dataSet_y[:nToTrain]\n",
    "\n",
    "    dataSet_x_test = dataSet_x[nToTrain:]\n",
    "    dataSet_y_test = dataSet_y[nToTrain:]\n",
    "\n",
    "    #Instancia o NaiveBayes\n",
    "    naive = NaiveBayes()\n",
    "\n",
    "    #Passa os dados para treino\n",
    "    #naive.train(features, class)\n",
    "    naive.train(dataSet_x_train, dataSet_y_train)\n",
    "\n",
    "    accuracy = 0.0\n",
    "\n",
    "    #Faz a predicao\n",
    "    #naive.predict(dados_para_classificar -> apenas features)\n",
    "    results = naive.predict(dataSet_x_test)\n",
    "\n",
    "    #Faz apenas o \"score\" do modelos, calculando quantos foram preditos corretamente\n",
    "    for index, r in enumerate(results):\n",
    "        yPredicted = max(r, key=r.get)\n",
    "        y = dataSet_y_test[index]\n",
    "\n",
    "        if(y == yPredicted):\n",
    "            accuracy += 1.0\n",
    "    \n",
    "    #Exibe a acuracia do algoritmo\n",
    "    print accuracy / len(dataSet_y_test)\n",
    "\n",
    "main()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
