{
 "metadata": {
  "name": ""
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "import math\n",
      "import csv\n",
      "import generatefeedvector as generate\n",
      "import pandas as pd\n",
      "import numpy as np\n",
      "import json\n",
      "\n",
      "\n",
      "#Retorna un diccionari amb la seguent estricutra:\n",
      "#{id_blog:(nom_politic,{paraula1:numAparicions;paraule2:numAparicision,...})}\n",
      "def download_text(df):\n",
      "    dicc={}\n",
      "    invalid_blogs=[]\n",
      "    #recorremos la lista con toda la informacion de todos los blogs\n",
      "    urls=list(df['url'])\n",
      "    for i in df.index:\n",
      "        try:\n",
      "            dicc[i]=generate.getwordcounts(df.ix[i]['url'])\n",
      "            #print \"blog :\", df.ix[i]['nom']\n",
      "        except:\n",
      "            invalid_blogs.append(df.index[i])\n",
      "            #print \"Link incorrecte, blog\", df.ix[i]['nom']\n",
      "    print \"\\nLectura dels blogs finalizada.\\n\",\n",
      "    return dicc,invalid_blogs"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 1
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# CREACIO DEL DATAFRAME \n",
      "unames = ['id_blog', 'partit_politic', 'nom', 'url']\n",
      "df = pd.read_table('blogs.dat',sep='::', header=None, names=unames)\n",
      "\n",
      "# EXEMPLE DE COM BAIXR D'UN CONJUNT DE BLOGS\n",
      "data_blogs,invalid_blogs=download_text(df)\n",
      "# EXEMPLE DE COM ELIMINAR AQUELLS BLOGS QUE NO S'HA POGUT ACCEDIR\n",
      "df=df.drop(invalid_blogs)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "Lectura dels blogs finalizada.\n"
       ]
      }
     ],
     "prompt_number": 2
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# CREACIO DEL DATAFRAME \n",
      "unames = ['id_blog', 'partit_politic', 'nom', 'url']\n",
      "df = pd.read_table('blogs.dat',sep='::', header=None, names=unames)\n",
      "\n",
      "data_blogs = {}\n",
      "invalid_blogs = []\n",
      "\n",
      "with open('data_blogs.json', 'r') as f:\n",
      "\tdata_blogs = json.load(f)\n",
      "\n",
      "with open('invalid_blogs.json', 'r') as f:\n",
      "\tinvalid_blogs = json.load(f)\n",
      "\n",
      "df=df.drop(invalid_blogs)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 103
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 1 </b> Escriure una funci\u00f3 <b>count_blogs(dataframe)</b> que retorni el nombre total de blocs. "
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#Retorna el n\u00famero total de blogs. \n",
      "def count_blogs(df):\n",
      "    return len(df.id_blog.unique())"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 4
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 2:</b> Escriure una funci\u00f3 <b>count_party_blogs(dataframe)</b> que compte quants blogs hi ha per cadascun dels partits."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#Retorna una Series que cont\u00e9 el n\u00famero de blogs per partit pol\u00edtic\n",
      "def count_blogs_party(df):\n",
      "    partits_politics = df.partit_politic.unique()       \n",
      "    series = pd.Series(data = [len(df[df.partit_politic == partit_politic]) for partit_politic in partits_politics],\n",
      "                       index = partits_politics)\n",
      "    return series"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 5
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 3:</b> Escriure una funci\u00f3 <b>count_words(dataframe,data_blogs)</b> que que retorni un diccionari amb totes les paraules que han aparescut en els blogs, indicant la quantitat de cops i el nombre de blogs on ha sortit.\n",
      "<br>Possible format sortida: {word :  {freq: valor;n_cops: valor}}"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# Aquesta funci\u00f3 ha de contruir un diccionari que contingui totes les paraules que s'han trobat indicant \n",
      "# el total de cops que ha aparescut i el nombre de blogs on apareix\n",
      "def count_words(df,data_blogs):\n",
      "   data = {}\n",
      "   for blog in data_blogs.values():\n",
      "       # recorremos las palabras del blog\n",
      "       for word in blog[1]:\n",
      "           # Si la palabra no esta en el diccionario, la incluimos\n",
      "           if word not in data:\n",
      "               data[word] = {'freq': 0, 'n_cops': 0}\n",
      "           # sumamos los valores   \n",
      "           data[word]['freq'] += blog[1][word]\n",
      "           data[word]['n_cops'] += 1\n",
      "   return data"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 6
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 4:</b> escriure una funci\u00f3 <b>count_words_party(dataframe,dicc_text)</b> que retorna un diccionari que conte el nombre de cops que ha aparescut cada paraula i el n\u00famero de blogs on  ha aparescut. Aquesta informaci\u00f3 ha de ser dividida en els diferents gruos pol\u00edtics.\n",
      "<br>Possible format sortida: {Partit_potilit :  {word :  {freq: valor;n_cops: valor} } } "
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#Cuenta la frecuencia de las palabras por un partido determinado\n",
      "def count_words_party(df,dicc_text):\n",
      "    \n",
      "    #: Diccionario a retornar tipo enunciado\n",
      "    diccionari = {}\n",
      "\n",
      "    #: Partidos politicos que hay\n",
      "    partits_politics = df.partit_politic.unique()\n",
      "    \n",
      "    for partit_politic in partits_politics:\n",
      "        \n",
      "        #: Por cada partido politico extraemos todos los indices de los blogs que pertenezcan\n",
      "        # a dicho partido.\n",
      "        idxBlogs_partitPolitic = df[df.partit_politic == partit_politic].index\n",
      "        \n",
      "        #: Diccionario que contendra todas las palabras que aparecen en los blogs de dicho partido\n",
      "        diccionari_paraules = {}\n",
      "        \n",
      "        for idxBlog in idxBlogs_partitPolitic:\n",
      "            #: Extraemos las palabras de un determinado blog\n",
      "            dict_paraules = dicc_text[idxBlog][1]\n",
      "            \n",
      "            for paraula, numAparicions in dict_paraules.items():\n",
      "                #: Comprobamos si la palabra ya esta en el diccionario de palabras del partido politico\n",
      "                if (paraula in diccionari_paraules):\n",
      "                    #Sumamos numero de apariciones e incrementamos en 1 el\n",
      "                    #numero de blogs en el que aparece\n",
      "                    diccionari_paraules[paraula]['freq'] += numAparicions\n",
      "                    diccionari_paraules[paraula]['n_cops'] += 1\n",
      "                else:\n",
      "                    #Incluimos palabra inicializada con el numero de apariciones inicial\n",
      "                    #y que ha aparecido una vez\n",
      "                    diccionari_paraules[paraula] = {'freq':numAparicions,'n_cops':1}\n",
      "                                                     \n",
      "        #Al partido politico le asignamos el diccionario de palabras totales de sus blogs.\n",
      "        diccionari[partit_politic] = diccionari_paraules\n",
      "        \n",
      "    return diccionari"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 11
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 5:</b> Calcular amb la funci\u00f3 <b>topNword(df,words_partits,N)</b> quines son les N paraules m\u00e9s representatives (les que apareixen amb m\u00e9s freq\u00fc\u00e8ncia) de cadascun dels partits. Retorneu un diccionari amb els seg\u00fcent format: {PSC: llista_top_words_PSC; ERC: llista_top_words_ERC;...}\n",
      "<br>Teniu en compte que tamb\u00e9 haureu de filtrar aquelles paraules que apareixen en la majoria de blogs, aix\u00ed com tamb\u00e9, les que \u00fanicament apareixen en un conjunt molt petit dels blogs."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#Cacula les N parules m\u00e9s representativa de cada partit pol\u00edtic. LA sortida ha de \n",
      "# ser un diccionari on tenim tantes entrades com partits politics\n",
      "# el valors de les entrardes ha de ser una llista amb les paraules seleccionades.\n",
      "def topNwords(df,words_partits,N):\n",
      "    #: numero total de blogs\n",
      "    n_blogs = count_blogs(df);\n",
      "    \n",
      "    #: Partidos politicos que hay\n",
      "    partits = df.partit_politic.unique()\n",
      "    \n",
      "    #: numero de blogs per a cada partit\n",
      "    countBlogs_Partit = count_blogs_party(df)\n",
      "    \n",
      "    #: palabras indiferentes que aparecen muy poco o demasiadas veces\n",
      "    paraules_importants = {}\n",
      "    \n",
      "    #Aquellas palabras que aparezcan menos del 10% de 121 blogs, o mas del \n",
      "    #50% de 121 blogs, seran palabras que aparecen en un conjunto muy peque\u00f1o \n",
      "    #de blogs o que aparecen en la mayoria de blogs, respectivamente\n",
      "    for paraula in words_partits:\n",
      "        if float(words_partits[paraula]['n_cops']) / n_blogs >= 0.1 and float(words_partits[paraula]['n_cops']) / n_blogs <= 0.6:\n",
      "            paraules_importants[paraula] = words_partits[paraula]['n_cops']\n",
      "    \n",
      "    #: top N paraules importants per a cada partit politic\n",
      "    paraules_importants_per_partit = {}\n",
      "    \n",
      "    #: paraules de cada partit\n",
      "    paraules_per_partit = count_words_party(df,data_blogs)\n",
      "    \n",
      "    # Recorremos el diccionario de paraules partits, y por cada partido, tan solo mantenemos las palabras\n",
      "    # que estan dentro del diccionario de palabras importantes\n",
      "    for partit in partits:\n",
      "        #: extraemos las palabras de ese partido\n",
      "        paraules_partit = paraules_per_partit[partit]\n",
      "    \n",
      "        paraules_importants_per_partit[partit] = sorted([ ( paraules_partit[paraula]['n_cops'], paraula ) \n",
      "                                                   for paraula in paraules_importants \n",
      "                                                   if paraula in paraules_partit ], reverse=True)[:N]\n",
      "    return paraules_importants_per_partit\n",
      "\n",
      "print topNwords(df,count_words_party(df,dicc_text),20):"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 12
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 6</b>: Creeu el vector de caracter\u00edstiques necessari per a fer l\u2019entrenament del Na\u00efve Bayes (funci\u00f3 <b>create_features()</b>)."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# Crea el vector de caracter\u00edsticas necesari per a l'entrenament del classificador Naive Bayes\n",
      "# selected_words: ha de ser el diccionari que obteniu amb la funci\u00f3 topNWords amb les paraules seleccionades\n",
      "# a partir de la funci\u00f3 TopNwords data_blogs : cont\u00e9 el diccionari amb la informaci\u00f3 de cada blog\n",
      "# Rertorna un diccionari que cont\u00e9 un np.array per a cadascun dels blog amb el vector de caracter\u00edstiques\n",
      "# corresponent(mireu l'exemple de l'enunciat)\n",
      "\n",
      "def create_features(data_blogs,top_words):\n",
      "    \n",
      "    diccionari = {}\n",
      "    \n",
      "    #: partidos politicos\n",
      "    partits = df.partit_politic.unique()\n",
      "    \n",
      "    #: N words \n",
      "    values = top_words.values()\n",
      "    all_topWords = list(set([values[i][j][1] for i in range(len(values)) for j in range(len(values[i]))]))\n",
      "\n",
      "    # Iteramos sobre los partidos\n",
      "    for partit in partits:\n",
      "        \n",
      "        #: diccionario de blog-vector caracteristicos de cada partido\n",
      "        diccionari_partit = {}\n",
      "        \n",
      "        #: indices de los blogs del partido\n",
      "        idxBlogs_partit = df[df.partit_politic == partit].index\n",
      "        \n",
      "        for idxBlog in idxBlogs_partit:\n",
      "            \n",
      "            #: todas las palabras del blog\n",
      "            paraules_blog = data_blogs[idxBlog][1].keys()\n",
      "            \n",
      "            #: vector caracteristico inicialmente a 0, y de longitud igual a all_topWords\n",
      "            caracteristic = [0] * len(all_topWords)\n",
      "            \n",
      "            # Por cada palabra de las importantes miramos si aparece en el blog\n",
      "            for idx in range(len(all_topWords)):\n",
      "                \n",
      "                if all_topWords[idx] in paraules_blog:\n",
      "                    #Si aparece, lo indicamos\n",
      "                    caracteristic[idx] = 1\n",
      "            \n",
      "            # Juntamos blog con su vector caracteristico\n",
      "            diccionari_partit[idxBlog] = np.array(caracteristic)\n",
      "            \n",
      "        # Adjuntamos conjunto de blogs junto-vectorCaract a cada partido\n",
      "        diccionari[partit] = diccionari_partit\n",
      "        \n",
      "    return diccionari\n",
      "\n",
      "# Aquest parametre el podem canviar i fer proves per avaluar quin \u00e9s el millor valor\n",
      "N = 20\n",
      "\n",
      "#: {partitPolitic:[palabras mas importantes del partido]}\n",
      "top_words=topNwords(df, count_words(df,data_blogs), N)\n",
      "\n",
      "#: {partitPolitic:[blog:[vectorCaracteristico]]}\n",
      "dict_feat_vector = create_features(data_blogs,top_words)\n",
      "\n",
      "dict_feat_vector['PSC']"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "metadata": {},
       "output_type": "pyout",
       "prompt_number": 14,
       "text": [
        "{0: array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1,\n",
        "       0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0,\n",
        "       0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1,\n",
        "       0, 1, 1]),\n",
        " 1: array([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1]),\n",
        " 2: array([1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1,\n",
        "       0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0,\n",
        "       1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0,\n",
        "       1, 0, 1]),\n",
        " 3: array([0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
        "       0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0,\n",
        "       0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1,\n",
        "       0, 0, 0]),\n",
        " 4: array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,\n",
        "       0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n",
        "       1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,\n",
        "       1, 0, 0]),\n",
        " 5: array([1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1,\n",
        "       0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 6: array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0,\n",
        "       0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,\n",
        "       1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
        "       0, 0, 0]),\n",
        " 7: array([0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1,\n",
        "       1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
        "       1, 1, 1]),\n",
        " 8: array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 9: array([1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1,\n",
        "       0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1,\n",
        "       1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1,\n",
        "       1, 0, 1]),\n",
        " 10: array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1]),\n",
        " 11: array([0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0,\n",
        "       1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1,\n",
        "       1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,\n",
        "       0, 1, 1]),\n",
        " 12: array([1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 13: array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 14: array([1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1,\n",
        "       1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0,\n",
        "       1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 15: array([0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1]),\n",
        " 16: array([0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
        "       0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 1, 0]),\n",
        " 17: array([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,\n",
        "       1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 18: array([0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 19: array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n",
        "       1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0,\n",
        "       0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1,\n",
        "       0, 0, 0]),\n",
        " 20: array([1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1,\n",
        "       0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n",
        "       0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       1, 0, 0]),\n",
        " 21: array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1,\n",
        "       1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1,\n",
        "       0, 0, 1]),\n",
        " 22: array([0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n",
        "       0, 0, 0]),\n",
        " 23: array([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n",
        "       0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 24: array([1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1,\n",
        "       1, 1, 1]),\n",
        " 25: array([0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0,\n",
        "       1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0,\n",
        "       0, 1, 0]),\n",
        " 26: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 27: array([1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0,\n",
        "       0, 0, 1]),\n",
        " 28: array([1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0,\n",
        "       0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0,\n",
        "       0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1,\n",
        "       0, 0, 0]),\n",
        " 29: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 30: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n",
        "       0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,\n",
        "       0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0,\n",
        "       0, 0, 0]),\n",
        " 31: array([0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1,\n",
        "       1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       0, 0, 1]),\n",
        " 32: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,\n",
        "       1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1]),\n",
        " 33: array([0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1,\n",
        "       0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0,\n",
        "       1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0,\n",
        "       0, 1, 0])}"
       ]
      }
     ],
     "prompt_number": 14
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 7</b>: Implementeu la funci\u00f3 d'aprenentatge del classificador Na\u00efve Bayes (funci\u00f3 <b>naive_bayes_learn()</b>).  La funci\u00f3 ha de mostrar el resultat obtingut per pantall\n",
      "<br>\n",
      "<b> * L'error d'entrenament</b>\n",
      "L'error d'entrenament es troba calculant el percentatge d'errors que s'obtenen quan es fa el testeig amb les mateixes dades utilizades per fer entrenament (aprenentatge). Aquest error es un valor molt optimista de com funcionar\u00e0 el clasificador i mai s'ha de prendre com a mesura per comparar clasificadors.\n",
      "<br>"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "def naive_bayes(dict_feat_vector, feature_vector):\n",
      "    #: partidos politicos\n",
      "    partits = dict_feat_vector.keys()\n",
      "    \n",
      "    #: nombre de categories\n",
      "    M = len(partits)\n",
      "    \n",
      "    #: vector de probabilidades del feature_vector por cada partido\n",
      "    vector_probabilitats_condicionals = []\n",
      "    \n",
      "    #: longitud del vector caracteristico\n",
      "    len_caracteristic = len(feature_vector)\n",
      "    \n",
      "    # Lo primero que hemos de hacer es contar cuantas veces aparece cada caracteristica/palabra\n",
      "    # en cada uno de los partidos y pasarlo a probabilidades P(A|B), P(par|partit)\n",
      "    for partit in partits:\n",
      "        \n",
      "        #: indices de los blogs del partido\n",
      "        idxBlogs_partit = dict_feat_vector[partit].keys()\n",
      "        \n",
      "        #: numero de blogs del partido\n",
      "        B = len(idxBlogs_partit)\n",
      "        \n",
      "        #: P(word(i)|partido)\n",
      "        diccionario = {}\n",
      "        \n",
      "        for i in range(len_caracteristic):\n",
      "            \n",
      "            #. numero de veces que aparece word(i), en un partido\n",
      "            A = 0;\n",
      "            \n",
      "            for idxBlog in idxBlogs_partit:\n",
      "            \n",
      "                #: word(i)\n",
      "                wi = dict_feat_vector[partit][idxBlog][i]\n",
      "                if wi == 1:\n",
      "                    A += 1\n",
      "        \n",
      "            #: P(word(i)|partido), probabilidad que la palabra salga en el partido\n",
      "            if A == 0:\n",
      "                diccionario[i]  = float(1) / float(B + M)\n",
      "            else:\n",
      "                diccionario[i]  = float(A) / float(B)\n",
      "            #diccionario[i]  = float(A + 1) / float(B + M)\n",
      "        \n",
      "        #Calculamos la probabilitat condicionada del feature_vector con el partido\n",
      "        Probabilitat = 0\n",
      "        \n",
      "        #Calculamos P(w1, w2, .., wn| partido)\n",
      "        for i in range(len_caracteristic):\n",
      "            if feature_vector[i] == 1:\n",
      "                Probabilitat += log(diccionario[i])\n",
      "            else:\n",
      "                Probabilitat += log(1 - diccionario[i])\n",
      "        \n",
      "        vector_probabilitats_condicionals.append((Probabilitat, partit))\n",
      "\n",
      "    return max(vector_probabilitats_condicionals)[1]"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 72
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from math import log\n",
      "\n",
      "def naive_bayes_learn(df, dict_feat_vector):\n",
      "    #: partidos politicos\n",
      "    partidos = dict_feat_vector.keys()\n",
      "    \n",
      "    #: aciertos totales\n",
      "    aciertos_totales = 0\n",
      "    \n",
      "    # Iteramos sobre los partidos\n",
      "    for partido in partidos:\n",
      "        \n",
      "        #: indices de los blogs del partido\n",
      "        idxBlogs_partido = dict_feat_vector[partido].keys()\n",
      "        \n",
      "        #: numero de aciertos coincidentes al blog\n",
      "        aciertos_partido = 0\n",
      "        \n",
      "        for idxBlog in idxBlogs_partido:\n",
      "        \n",
      "            #: vector caracteristico de cada blog\n",
      "            feature_vector = dict_feat_vector[partido][idxBlog]\n",
      "            \n",
      "            #: naive bayes con el vector caracteristico del blog\n",
      "            M = naive_bayes(dict_feat_vector, feature_vector)\n",
      "            \n",
      "            # Si el maxim retornado de las probabilidades condicionadas es \n",
      "            # el mismo partido al que pertenecia, hemos acertado en la clasificacion\n",
      "            if M == partido:\n",
      "                aciertos_partido += 1\n",
      "                \n",
      "        print partido, \"encerts:\", aciertos_partido, \"/\",\n",
      "        print \"blogs:\", len(idxBlogs_partido), \"/\",\n",
      "        print \"%.2f\" %((float(aciertos_partido)/ float(len(idxBlogs_partido))) * 100),\"%\"\n",
      "    \n",
      "        aciertos_totales += aciertos_partido\n",
      "        \n",
      "    error = (1 - (float(aciertos_totales) / float(len(df.id_blog.unique()))))*100\n",
      "    \n",
      "    print \n",
      "    print \"Error naive bayes: %.2f\" %(error),\"%\"\n",
      "    \n",
      "    return error\n",
      "## EXEMPLE SORTIDA:\n",
      "#PSC encerts: 24 / blogs: 34 / 70.59 %\n",
      "#ICV encerts: 21 / blogs: 26 / 80.77 %\n",
      "#CIU encerts: 29 / blogs: 32 / 90.62 %\n",
      "#ERC encerts: 26 / blogs: 29 / 89.66 %\n",
      "#\n",
      "#Error naive bayes: 17.36 %\n",
      "\n",
      "\n",
      "#n,error = -1,float(\"inf\")\n",
      "#for i in range(20,30):\n",
      "#    err = naive_bayes_learn(df, create_features(data_blogs,topNwords(df,count_words(df,data_blogs),i)))\n",
      "#    if err < error:\n",
      "#        n = i\n",
      "#        error = err\n",
      "#print n, error\n",
      "N = 13\n",
      "naive_bayes_learn(df, create_features(data_blogs,topNwords(df,count_words(df,data_blogs),N)))"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "PSC encerts: 18 / blogs: 34 / 52.94 %\n",
        "ICV"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 20 / blogs: 25 / 80.00 %\n",
        "CIU"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 15 / blogs: 32 / 46.88 %\n",
        "ERC"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 23 / blogs: 29 / 79.31 %\n",
        "\n",
        "Error naive bayes: 36.67 %\n"
       ]
      },
      {
       "metadata": {},
       "output_type": "pyout",
       "prompt_number": 118,
       "text": [
        "36.66666666666667"
       ]
      }
     ],
     "prompt_number": 118
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 8: </b> Indiqueu l'error de generalitzaci\u00f3 fent servir Leave-one-out (funci\u00f3 <b>leave1out ()</b> )\n",
      "\n",
      "<b> * Aproximaci\u00f3 a l'error de generalitzaci\u00f3 fent servir Leave-one-out</b>\n",
      "Una bona forma de veure com funcionaria el nostre classificador davant de dades sobre les quals no s'ha entrenat \u00e9s fer servir l'estrat\u00e8gia leave-one-out. Aquesta estrat\u00e8gia entrena el classificador amb totes les dades d'entrenament menys amb una i fa el testeig sobre la dada que hem excl\u00f2s de l'entrenament. Aquest proc\u00e9s d'exclusi\u00f3 es repeteix per cadascuna de les dades d'entrenament. El percentatge d'errors fent servir aquesta estrat\u00e8gia permet comparar classificadors."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#M\u00e8tode per avaluar el classificador mitjan\u00e7ant la t\u00e8cnica leave-one-out.  Ha de mostrar el resultat\n",
      "#obtingut per pantalla\n",
      "def leave1out(df, dict_feat_vector):\n",
      "    #: partidos politicos\n",
      "    partidos = dict_feat_vector.keys()\n",
      "    \n",
      "    #: aciertos totales\n",
      "    aciertos_totales = 0 \n",
      "    \n",
      "    error_total = 0.0\n",
      "    \n",
      "    # Iteramos sobre los partidos\n",
      "    for partido in partidos:\n",
      "        \n",
      "        #: indices de los blogs del partido\n",
      "        idxBlogs_partido = dict_feat_vector[partido].keys()\n",
      "        \n",
      "        #: numero de aciertos coincidentes al blog\n",
      "        aciertos_partido = 0\n",
      "        \n",
      "        for idxBlog in idxBlogs_partido:\n",
      "        \n",
      "            #: vector caracteristico de cada blog\n",
      "            feature_vector = dict_feat_vector[partido][idxBlog]\n",
      "            \n",
      "            # LEAVE ONE OUT\n",
      "            del(dict_feat_vector[partido][idxBlog])\n",
      "            \n",
      "            #: naive bayes con el vector caracteristico del blog\n",
      "            M = naive_bayes(dict_feat_vector, feature_vector)\n",
      "            \n",
      "            # GET IN ONE\n",
      "            #dict_feat_vector[partido][idxBlog] = feature_vector\n",
      "            \n",
      "            # Si el maxim retornado de las probabilidades condicionadas es \n",
      "            # el mismo partido al que pertenecia, hemos acertado en la clasificacion\n",
      "            if M == partido:\n",
      "                aciertos_partido += 1\n",
      "                \n",
      "        error_partido = ((float(aciertos_partido)/ float(len(idxBlogs_partido))) * 100)\n",
      "                \n",
      "        print partido, \"encerts:\", aciertos_partido, \"/\",\n",
      "        print \"blogs:\", len(idxBlogs_partido), \"/\",\n",
      "        print \"%.2f\" %(error_partido),\"%\"\n",
      "    \n",
      "        error_total += error_partido\n",
      "        aciertos_totales += aciertos_partido\n",
      "        \n",
      "    error_bayes = (1 - (error_total / 400)) * 100\n",
      "    \n",
      "    print \n",
      "    print \"Error naive bayes: %.2f\" %(error_bayes),\"%\"\n",
      "    \n",
      "    return error_bayes\n",
      "    \n",
      "## EXEMPLE SORTIDA:\n",
      "#PSC encerts: 24 / blogs: 34 / 70.59 %\n",
      "#ICV encerts: 21 / blogs: 26 / 80.77 %\n",
      "#CIU encerts: 29 / blogs: 32 / 90.62 %\n",
      "#ERC encerts: 26 / blogs: 29 / 89.66 %\n",
      "#\n",
      "#Error naive bayes: 17.36 %\n",
      "\n",
      "\n",
      "#n,error = -1,float(\"inf\")\n",
      "#for i in range(10,20):\n",
      "#    err = leave1out(df, create_features(data_blogs,topNwords(df,count_words(df,data_blogs),i)))\n",
      "#    if err < error:\n",
      "#        n = i\n",
      "#        error = err\n",
      "#print n, error\n",
      "N = 13\n",
      "leave1out(df, create_features(data_blogs,topNwords(df,count_words(df,data_blogs),N)))"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "ename": "ValueError",
       "evalue": "math domain error",
       "output_type": "pyerr",
       "traceback": [
        "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
        "\u001b[1;32m<ipython-input-128-21b62b5db7c8>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     71\u001b[0m \u001b[1;31m#print n, error\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     72\u001b[0m \u001b[0mN\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m13\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 73\u001b[1;33m \u001b[0mleave1out\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcreate_features\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_blogs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtopNwords\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcount_words\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdata_blogs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mN\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
        "\u001b[1;32m<ipython-input-128-21b62b5db7c8>\u001b[0m in \u001b[0;36mleave1out\u001b[1;34m(df, dict_feat_vector)\u001b[0m\n\u001b[0;32m     28\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     29\u001b[0m             \u001b[1;31m#: naive bayes con el vector caracteristico del blog\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 30\u001b[1;33m             \u001b[0mM\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnaive_bayes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdict_feat_vector\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeature_vector\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     31\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     32\u001b[0m             \u001b[1;31m# GET IN ONE\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
        "\u001b[1;32m<ipython-input-72-19c42abb745f>\u001b[0m in \u001b[0;36mnaive_bayes\u001b[1;34m(dict_feat_vector, feature_vector)\u001b[0m\n\u001b[0;32m     52\u001b[0m                 \u001b[0mProbabilitat\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mlog\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdiccionario\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     53\u001b[0m             \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 54\u001b[1;33m                 \u001b[0mProbabilitat\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mlog\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m \u001b[1;33m-\u001b[0m \u001b[0mdiccionario\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     55\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     56\u001b[0m         \u001b[0mvector_probabilitats_condicionals\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mProbabilitat\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpartit\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
        "\u001b[1;31mValueError\u001b[0m: math domain error"
       ]
      }
     ],
     "prompt_number": 128
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "<b>Exercici 9</b> Definici\u00f3 de la funci\u00f3 principal. Modifiqueu la funci\u00f3 per tal que s'ajusti a les vostres funcions."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "# Main. Se hacen las llamadas a las diversas funciones para la correcta ejecucion del programa    \n",
      "def main():\n",
      "    \"\"\"\n",
      "    unames = ['id_blog', 'partit_politic', 'nom', 'url']\n",
      "    df = pd.read_table('blogs.dat',sep='::', header=None, names=unames)\n",
      "\n",
      "    # EXEMPLE DE COM BAIXR D'UN CONJUNT DE BLOGS\n",
      "    data_blogs,invalid_blogs=download_text(df)\n",
      "    # EXEMPLE DE COM ELIMINAR AQUELLS BLOGS QUE NO S'HA POGUT ACCEDIR\n",
      "    df=df.drop(invalid_blogs)\n",
      "    \"\"\"\n",
      "    \n",
      "    # agafem la frequencia de paraules que apareix a cada blog\n",
      "    words_partits=count_words(df,data_blogs)\n",
      "    \n",
      "    #agafem el conjunt de paraules que utilziarem\n",
      "    N = 13 # Aquest parametre el podem canviar i fer proves per avaluar quin \u00e9s el millor valor\n",
      "    top_words=topNwords(df,words_partits,N)\n",
      "    \n",
      "    for partit in top_words:\n",
      "        print partit\n",
      "        print top_words[partit]\n",
      "    \n",
      "    print\n",
      "    \n",
      "     # SORTIDA ESPERADA :\n",
      "    #Paraules m\u00e9s representatives mas representativas\n",
      "    #PSC \n",
      "    #[u'los', u'por', u'las', u'para', u'mas', u'psc', u'con', u'est', u'mso', u'tant', u'democr', u'partit', u'pais', u'social', u'nos', u'temps', u'nou', u'como', u'primer', u'sino']\n",
      "    #ICV \n",
      "    #[u'los', u'mso', u'las', u'sobre', u'icv', u'social', u'ciutat', u'para', u'por', u'gent', u'est', u'ciu', u'contra', u'persones', u'font', u'fins', u'qui', u'crisi', u'part', u'any']\n",
      "    #CIU \n",
      "    #[u'ciu', u'ciutat', u'nostre', u'barcelona', u'tant', u'nostra', u'sempre', u'pais', u'mso', u'ajuntament', u'fins', u'gent', u'any', u'aquests', u'gran', u'nou', u'avui', u'poble', u'president', u'est']\n",
      "    #ERC \n",
      "    #[u'pais', u'esquerra', u'sobre', u'social', u'nostre', u'fins', u'espanyol', u'part', u'nostra', u'gent', u'tant', u'persones', u'nacional', u'poble', u'any', u'erc', u'encara', u'esta', u'aquests', u'catalana']\n",
      "   \n",
      "    print \"--------\\n\"\n",
      "    \n",
      "    #creem el vector de caracter\u00edstiques\n",
      "    feature_vectors = create_features(data_blogs,top_words)\n",
      "    \n",
      "    #Creem i avaluem el cassificador amb les mateixes dades d'entranament \n",
      "    naive_bayes_learn(df,feature_vectors) \n",
      "    \n",
      "     \n",
      "    print \"--------\\n\"\n",
      "    #PSC encerts: 24 / blogs: 34 / 70.59 %\n",
      "    #ICV encerts: 21 / blogs: 26 / 80.77 %\n",
      "    #CIU encerts: 29 / blogs: 32 / 90.62 %\n",
      "    #ERC encerts: 26 / blogs: 29 / 89.66 %\n",
      "    #\n",
      "    #Error naive bayes: 17.36 %\n",
      "    \n",
      "    #Avaluem el classificador mitjan\u00e7ant la t\u00e8cnica de leave-lone-out\n",
      "    leave1out(df, feature_vectors)\n",
      "    \n",
      "    #PSC encerts: 17 / blogs: 34 / 50.00 %\n",
      "    #ICV encerts: 17 / blogs: 26 / 65.38 %\n",
      "    #CIU encerts: 26 / blogs: 32 / 81.25 %\n",
      "    #ERC encerts: 24 / blogs: 29 / 82.76 %\n",
      "    #\n",
      "    #Error leave one out: 30.58 %\n",
      " \n"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 120
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "main()"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "PSC\n",
        "[(27, u'socialista'), (25, u'comen'), (24, u'socialistes'), (24, u'ncia'), (23, u'congres'), (23, u'confian'), (22, u'tica'), (22, u'para'), (22, u'ning'), (22, u'for'), (22, u'democr'), (21, u'las'), (21, u'gestio')]\n",
        "ICV\n",
        "[(23, u'icv'), (23, u'euros'), (23, u'democr'), (22, u'cia'), (21, u'ries'), (21, u'mesures'), (21, u'local'), (21, u'comen'), (21, u'blic'), (20, u'tracta'), (20, u'sol'), (20, u'pugui'), (20, u'ning')]\n",
        "CIU\n",
        "[(21, u'unio'), (21, u'porta'), (20, u'tingui'), (20, u'sota'), (20, u'pogut'), (20, u'paraules'), (20, u'necessari'), (20, u'manca'), (20, u'estic'), (20, u'comen'), (20, u'alcalde'), (19, u'trobem'), (19, u'prendre')]\n",
        "ERC\n",
        "[(25, u'llibertat'), (25, u'independencia'), (25, u'espanyol'), (24, u'ningu'), (24, u'esta'), (23, u'gairebe'), (22, u'viure'), (22, u'sera'), (22, u'present'), (22, u'popular'), (22, u'madrid'), (22, u'entendre'), (22, u'discurs')]\n",
        "\n",
        "--------\n",
        "\n",
        "PSC"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 18 / blogs: 34 / 52.94 %\n",
        "ICV"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 20 / blogs: 25 / 80.00 %\n",
        "CIU"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 15 / blogs: 32 / 46.88 %\n",
        "ERC"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 23 / blogs: 29 / 79.31 %\n",
        "\n",
        "Error naive bayes: 36.67 %\n",
        "--------\n",
        "\n",
        "PSC"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 14 / blogs: 34 / 42.42 %\n",
        "ICV"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 20 / blogs: 25 / 83.33 %\n",
        "CIU"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 13 / blogs: 32 / 41.94 %\n",
        "ERC"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        " encerts: 23 / blogs: 29 / 82.14 %\n",
        "\n",
        "Error naive bayes: 37.54 %\n"
       ]
      }
     ],
     "prompt_number": 121
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [],
     "language": "python",
     "metadata": {},
     "outputs": []
    }
   ],
   "metadata": {}
  }
 ]
}