{
 "metadata": {
  "name": "",
  "signature": "sha256:6031a1a9e040a5f3ca039015076c5e60e4b58dc643c9027163dd1eea7a4a8d68"
 },
 "nbformat": 3,
 "nbformat_minor": 0,
 "worksheets": [
  {
   "cells": [
    {
     "cell_type": "heading",
     "level": 1,
     "metadata": {},
     "source": [
      "Image Processing"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "En esta secci\u00f3n trabajaremos con clasificaci\u00f3n de im\u00e1genes. Cada instancia a clasificar es una imagen con el rostro de una persona. El objetivo es asignar el nombre de la persona con el rostro correcto. Para eso utilizaremos un dataset de *sklearn.datasets* que contiene im\u00e1genes de rostros con sus correpondientes etiquetas. Cada imagen se representa como un vector de pixeles.\n",
      "\n",
      "Utilizar la funci\u00f3n *fetch_lfw_people* para importar los datos de las personas de las que se tiene al menos 50 im\u00e1genes de su rostro. Inspeccionar su contenido (data, target y target_names), renderear el rostro de la primera instancia del dataset:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "%matplotlib inline\n",
      "import matplotlib\n",
      "import matplotlib.pyplot as pl\n",
      "\n",
      "import pylab as pl\n",
      "from sklearn.cross_validation import train_test_split\n",
      "from sklearn.datasets import fetch_lfw_people\n",
      "import time\n",
      "\n",
      "# Inicio cuenta de tiempo\n",
      "start_time = time.clock()\n",
      "\n",
      "# Obtengo datos de las personas con al menos 50 imagenes de su rostro\n",
      "lfw_people = fetch_lfw_people(min_faces_per_person=50, resize=0.4)\n",
      "\n",
      "# Extraigo informacion de la lista\n",
      "n_samples, h, w = lfw_people.images.shape\n",
      "X = lfw_people.data\n",
      "n_features = X.shape[1]\n",
      "y = lfw_people.target\n",
      "y_nombres = lfw_people.target_names\n",
      "target_names = lfw_people.target_names\n",
      "target = lfw_people.target\n",
      "data = lfw_people.data\n",
      "n_classes = target_names.shape[0]\n",
      "\n",
      "# Dibujo\n",
      "pl.figure(figsize=(1.8 * 4, 2.4 * 3))\n",
      "pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n",
      "pl.subplot(3, 4, 1)\n",
      "pl.imshow(X[0].reshape((h, w)), cmap=pl.cm.gray)\n",
      "pl.xticks(())\n",
      "pl.yticks(())\n",
      "pl.show()\n",
      "\n",
      "print 'Target names: '\n",
      "print '-------------'\n",
      "print target_names\n",
      "print '\\nTarget: '\n",
      "print '-------'\n",
      "print target\n",
      "print '\\nData: '\n",
      "print '-----'\n",
      "print data"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "metadata": {},
       "output_type": "display_data",
       "png": "iVBORw0KGgoAAAANSUhEUgAAAGsAAACMCAYAAAB7/lc2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztneluG0uShaMoUtxJSb7XjWt3v1M/cT9Mo2W0LUoWxU1c\na34YJ/XVUZYW0oMZA06AkEQWqzLjxHIiclFRlmX8br9Ga/xfd+B3e3v7DdYv1H6D9Qu132D9Qu03\nWL9Q+w3WL9SaL31YFMVvXv9/1MqyLPy9F8GKiPjnP/8ZjUYjiqKIVqsV3W432u12nJ+fR6vVit1u\nF/P5PBaLRbRarWi329HpdKLf70e/34+iKKIsyyiKIjqdTvpus9l89jo7O4uzs7MoiiJ9b71ex2az\nicfHx3h8fIzNZhP7/T72+30cDocoyzL2+308Pj7GarWKzWaTXofDIfb7fWy321iv1/H4+BgREUVR\nRKPRiPPz82i326lP7XY7zs7OKn05OztLsmg0GqmvksnhcEj3Zr84hsPhEIfDIfVrsVjEw8NDTKfT\n2O12sd/vIyKi0+lEp9OJf/3rX1ksXgXr/Pw8PUwD1QAkjP1+H7vdLprNZrpGAimKIg00Jwi9JIRW\nq5Wevd/vKwPWcwiUknr1q9FopJf6zGsIlvrHvuoaNf+uP1PjarVa6fv7/T5d81rRYb/fx3q9jrIs\nkzLUtVfB8gFJkyVICU/XcMCHw6EiFL0nweueFIJA0EDUBwGq7xEwPUfX6Bn8TJ+r5YDxRhB1DykV\nv+9KqnGq//xM/VBfN5tN6udut4vNZnM8WHqATL8sy+RiBJwLRNfJ2jiA3W6Xfpc2Sbh0CdTgVqtV\nAXG328Vut6sIxK2Ulnc4HKLZbFaUwS2Fz+O4dU8qQ6vVqnyHbpH3ZAhQH9UX3UfflddYr9eng+Wu\nT4KVwAmUOrnf79P77s5SB2AJdFsapAs/ItLzBLz3QQLRs/QevULdWDnmnOuWoHVvPVvxK+dOaVV0\nmwQrImr79S6wJGjdkLHKLU/Cy92DA6cCqJN0i+4iHUQXSp11EAA1XqM+nZ+fJ+Lj/aQH2e12yf1v\nt9sEGIkEXTRDQ85S9Szd92Sw1Oiu9LuERrAokDqCIS3TfXWNfr4GhsdCXuP99lias1wxWQrTvQRB\nEbPbbrdZJco9m+SnjnW+REjeBBY7Sa3xzuQ02ZmXOsSYQjemgeg9NX8un+3f0zPchfKZRVEka5JF\nOYFin9l3Wgll42A5gKT0VGK93DMcDZYGIKFRWOoYwWGnmD9RS9VZxjp3FRpALtbkqDMH70qj5wmY\nbrcb3W43zs/Pk2aLvHA8jC/6brvdrrg+Z6CMzexLzlOo3z8lZlFY7v5eMvuc8HxwYmkckKxBTDFn\niQ527tme68jaIn7Eqm63G51Op2JV2+02Hh8fn7lmXSOXSQVjrrndbhPgZIO6js1j2skxSw+uc0N6\nqGuXLMpdibuIl/y5rqOiUDD8m9eov7RG9cd/Z7ogZTo7O6tUN1qtVpyfnz8DlM+QnDzvJEX35P/8\n/Dx2u120Wq1KynI0WG5ROUETLDdtB8uTYKf9ZEjK1Vwom80m1uv1M5A8UXZXI1dMl0yCoJxJVjcY\nDGI4HKY+qqwlsFarVazX62dAHQ6HBAYtV2OSW3X3KsU8Gix3WxJ6LtDnzNkZmn/u+YoSaQVjxrcc\nZedz2B/GO1ceKYYrYEQ8q1XqM7cguTv3OD5WysBzRVqch4ujwMo9iE1CyBVjGWhzhVGmAnyGNI5F\nXbpF5iYs1joxcGHKxbHiQDfsLvrx8THW63XFgzAeqQCsRit3hiqaz+8TKPbjZLBoIbkgT42sIxO6\nVt+j65LAt9tthVIrbpAhyufTDW6321Rn08B1PyoD4wZdrn7X9aqkPz4+PrNmudJ2u53GqyqJ+uOs\ncLPZVBLpiEjPzYWJo8DyWJR73xspO2MagT0/P6/EJMYmtv1+H8vlsqLdJBh6CRjGwVyVgaUiKV7O\nsjSObrf7TAk97+R0DGueBJD1TCq5mhvAUWBxoLyZsz9vuURYAiDLova74EUkNIfFZ/o0ie4R8VTU\ndQVgjkfXSQYrYNyyZd26dr1ex3q9rpAN9U0Jd7vdjogqo/bSmhTwJZDeDFYOjFwcy5EM70yO2tNF\nqjE+qA8+l+VBPfeeJ/GudLmUQ8+nJ8iVvugqBZqu96SecuQzvO/s61FguQD5AHbGmZZTdg7wcPgx\nu3p2dpZAiKhahDS02Wym2ODWpb5EVKdN2GcJXn1heandbmcpvhTtcDik2We5dCnAarWK5XIZ2+22\nkpdpNlx9kxum0rpC0V1LFkeB5ZbDwbk2kn7mqLU6ojkb3kMlI8UyuiXSbDI73tvjngDSdwmWXLAH\ndFojczqPUcq35PqGw2F0Op0YDocxHo+j1+sl9y1AczRe4BOsk/IsN9tcIsz3eW1ElYjQRTAJ5ne3\n222FAIiQcKB1McxLPh7waeGc7c3lRB7feH8mvmdnZ9Hr9aLX68XZ2VkiG6vVKiXNYoIaj5rH85yS\nvxus3A1ycSLXDodDEq6sjtot4GgdEnhRFJUKgCxbwmLewrgmAeVcm+7LMdGF09p0jXJB9o2Lh/r9\nfvR6vYiIVOFYLpexXC4rE6Re0OYzfgpYMtecz2X1Qc0Dpwa33W4rGbootNgXn5cjAGqiymJjTon5\nPPaJuR8bqX7dy8mBqg9ajaSqPZnhYrGIxWJRSZC1IsyTYP5+Up6lbNvBonBy0yb6nNRc2su1BkVR\npLKLfhel173n83kCgcvS+GzXSMUkgpVLJ6hoVDB9R1avvnU6nWcVGxEO9c2rFVS0siyfLcX7aeWm\n3HQzNZ5rMSKeTJmuSsFTpSYlr/L70m65ReZY0tDlchmLxSKBROYnF0MhEqyctXhK4ZWQiHgWXzWb\n3G63K9+ne1S/BZbnclIAFnFzDPoosDwI+mc5txcRlaBMqup5xXK5TO5Q2sypDgXsl2g8XRktO0eA\ncmCxfxKsrtH1tASttvLpGY9rVFSW07Q2kksKXlov+GawZOa5QZNlRTwF4lxC6kFd18tiPGF0ny6r\nI0i5OSt+RmFRwdzVEBSSEoFEC/MZby8UEyixQI+nUmCf3zo5KSYQEc8XfXrh1oWZs0gNVMmx3Ia+\nlyv5MA/T4EmlmRwz3klgLzFW3VtjcWuim6Lb1LNyypOrtEhGVGAqhOeP7waLAyeAFCinM8gUcw+W\n9uuey+UyVqtVEqgCMKvqh8OhEjt0H1kAp11yZZyXyj4RVQWUlnNSUJ8JoNVqVesCBZ7YH92b7qsx\nSMkUx062LPpcshWWWLzU7/SbQtSgxZwWi0ViexLKbrerrLEXWLmlWzn668/mKiRnr7oHwcrlgBFR\nmaVWY77nSbjmutg31gy9qKv7HQ2WayFNlztCcpTT45QGtl6vU4Yvyitq71UFWaFP+3PgHk98rYW7\nZnoKCpGLYRiX9N3tdhur1SoeHx8rY3OKzqkU9cHHRrDk3kWmTgKLgyL1VNGS69n9ev1OrV6tVjGf\nz1OGT1IiyxNBkBXSghhTuJZBi1oofC9JOe33cbnmE7DNZpMKuFQWeh66alkY11h4QaHR+LGsTYrw\n08CiFko47Xb7WX5ANqUmgWlaQVaVe54H8br6JPMeXcNlYjnmRsty1knlIqPU76yccHkA4zMtSH/7\nzLbGydjW6XQqsfcosPzGGoxiFvcl5YK2AyCtbjab0el0nrnKsiwrDCxHCFholfuUQqhI6+tBPCHO\nuVzdg0yPNUb1rdvtVsAlPXf3WpfoMkZqXo/TK0eBlavRuQuqi2sUIiseIgwq3XiS6uCzUpEDi0Im\nDfcqAWMH4xrHSotyMhER6b5qBIn5FqdgXgKLKcHJYGkVETWGJEO+2Bkgi6AUdERUXAh9PHMz3ZNa\n79ZLTdb1XPdAJZHwKFC/J0tHnr/lvITcmjYpUNF2u11SFnkSr5a4Jzo/P49+v388WLIclmCkmQzi\nTBjZGC9Y8WASKnrsgqJyMK/KlYho/U4qSNX5Xs69surh1ReOhbtI9LuETtkoHjG2e5xjfjkcDo8H\nSzO3uYDsjEtgUPtyLlRapKkFdV4CcQtTc3fiFk2FcZpOoN3q+Jlblcczr/yr+rLZbJ6lEWJ6ug+n\ng6jYBCvnMt8M1mg0isVikf5mNcADNt0gLUMd8qArTdZPaqu7ITYnJXUA0h3ycy8FqW8CxK1aABIk\nn/jMkS7JSPf1tIaz5DkG/W6wxuNxRDxV0ckI6cIkCHWMf1MguWl8AuUTiryPnkuW5/kcBSOlcA12\nV8nv8jn6/n6/T4m8ZoDdFbP6IW9EBv0SWPq8joy8GazRaJRYnPyyW0lOoLKKnDuSJe12u+RKeH6F\nGu/ppRxPfvVculJajMpZBIsC9xyObkpVF85TuQuNiOTWWSojicoViangkvPRYF1cXFRKLZxyeM1s\n9XBZT44AKEGm6/MtNszR5DYjnuqT0mQKmnFM1pWzKDa6bBIRKZQE2Wq1KsDpu6L0PF2AwOReBFzK\nezRY4/E4aVaOUju1phnnqtEM4lIAlZ10f7k5DVrKInek++pzPdsTaSmS3nNi4f0XUOo7KxZ6bsSP\ndEb1TC5P6PV6yR3SYnMg0ZVrDCeDVZY/svZer/fMFbmp8zvO1PS+7sHAzwJso/G0fVTrNVTIzU3X\n655atURr1N+aneUqKSdEuqdXL0R4ZNHsB1denZ2dxXA4TCueSMI4ycg5M/bDXe9JYHW73TQQz6vI\n7hwwvkipcxUR3z3C4MwB6/7qA5d7ka1GPLlqJqmaU8rlWbQmPwOKCiWXp4S31WrFcDiMXq9XC5aX\nvKh0JBpHg+Uapa2VsiSyHIKkTrgWeVWCtTtOucg9iNiQ0pPd6X6k0O6WKRwJzU+tiXgiMdwQwZ2R\nXCfCTePqt4BitULPIljMwwiOPMTRYHFWNCJSAZZlJuYm3kkmmbnirhpZk+633W7T5CSDfK6+x9jl\nzIvvs6qg99UXxQxOz7AYQGEXRZFIkCoUXJbtTJNVDRaq5QKlmCcdB6RpDD1Y2kXLIo3VtTmwqPUk\nEy5UWdBqtYrZbBYPDw+V4+vkKtXI9mQ9BJKWplyIRWQpnaoQJEYqDtdRb8UuxVh3ccyp/Aggxlwp\ny0lgKUB7nuQ0mNSZHWVtTrGDgtOAmQLI7TFfa7Vaac0gK+nO5tgHCZVzb51OJ3q9XvT7/coSMMUr\nVSnKskxexet9tAzGGo7bc1HGLimSx3Ot6D0JrMPhkKgrY49TYRcgGSOptCyzLMtKjVADWK/XFQDp\n97XFhtm/KwrdpIPV7Xaj1+vFcDhM7+k+AkqphFIFr/mRVQoohgHvSw4sT8zlek+yrFzHBIwvpCFZ\nkPA1Q8pBsOotsDwBPhwOycVEPE2ryFV5NSJXziFQclUCq9frVarhqqqMRqOYz+eVXY1UQLdkNaYq\nHi/VJw8D7p1OBqvT6SRBK8hzWlvWpk5S4yOerEKarjimAM6YwZILUwQJlMCzquGpgIQityeAdBSs\nTpZh7NM9u91ujMfjStIrELgsgCus+H0ptOIoqyFc+ONLDCSTk8DSzKU6Jo3jmgJ1iKuDSN/1nr4j\nV8ZaI92qgyUG53kalyg7DXewBBStijPdunev14uLi4sE1HK5TFSefcuV2phg60UlFCh836s8J4H1\n7du3JBBND9DXUps5za7Ok7YqufWYRorMQbMM4+97dUTKJHDPzp7OZtJPrgdk/ueCVs2x3+8nK1Ny\n7PNVTtPZ6PYENKm6XK9XSI4G68uXL5XlYupArjrBQisTYY8xjHEqEzHBpQB9NbDeF8iey5BMyLII\nlMdgHwuT3l6vV5m38llnWhYrOQSLjesL1W8H66UNCm/KswQCgzYFR+EpA6dPZ17G3Iq5Wk5LqQQE\nhZ/rPZIJj0k82pz9YHMGJ+vq9XoV66dg1byE5FUTPY9hQ8/0Qybdtb4LrM+fP6f9UdQoZzVyg65Z\nZHkSgq5XoBXA7ip88YuzStJzns2ulxdOWWtUsOezcsLS53KtquAwRmpcL5XaKCt+lwVjl9+7wfr0\n6VPc39/HdDpNuRYLudRyMjXGJOZL9PkCyoVIpSDAAoylKf2uHEqg+epcWrpiq0pmtHL1xctF7tYp\ndN1bMmC9L5ePMSdjHZKJ9VFgqfalsovWejPAe/mJ9Tun5T6VnaubEXgJSxYiv87vcBKS11FYuqeE\nFPG0zM6nLdQIGPNJegzGSgHn5IeumzGfSp6Lze8Ga7/fR7PZjMFgEBGRNEBawvjD97nsWFZEwdA1\nkNaTlpNpssTjizd5TyoCmSetVsL0iovHGDJSvUeg1DcnUU6AcpSe9+S4c6zyzWAtl8uKy1HBk9MF\nLEoyk1cHBJZ+8jt0eSIJnGzU97n5gPkcq9e5eqULRU0xNpdMu1B1DctYvA/dJJWc3/UKCxNozhOe\nBNb19XUSymq1ioeHh1gsFhVa7CDlAqkElksaSVI00elT9mR1dCUkHxSgYg+ndhi7uFmOrpkKyNiq\nwjJdMkkLXb9aroTmOSLjmGRzNFj/+c9/0oC0ZmKz2cTl5WW02+3K0TdOuQkO369rRfF0tIIGK4FJ\nObguQ4VXVRd4Hz0vBxar/dyPRbAIrNyyXDbJiwAh8WLB2pXKc1Ov2LzU3jyfFfG0iKQsy2i32/Hn\nn39Gv9+Pu7u7RDpoKbQMdt4BEjBeoXbBqjrvA2Mwp6Y6XWfMEUDuFUh6VObSlAnnmzjZyMbn8G/1\n08mHy+qkPMsn4hR7+v1+fPr0KXq9Xmw2m5hMJrVm7tWPOgA9dxJIZHg8X8LPmNDztAdMlkN3688k\n6C407f9SfOayNP1vsFzyW5dXsY85MnMyWHI3nM8qiiL6/X58/Pgxer1e3NzcVNiPC887WPfyKRZa\nF1kTgSJJkPZrh6LcqlcX6J5ycZZgHQ6HyiQlt9T6diJat4PDvwmqv39SzKL2iHlxIk/HHrAT/C47\n78Jyv++Jr77LE2nInvjTKwGNRiP6/X788ccfMRgMnpWEOJFKRZAXYMlpMBgkqj+bzZKyaKpI7tmV\nM0csXDaSWU5+7waLSWBEJD9ONsip8TpS4Vm95yqu3cxRCDiDNV8iG8rPiqKIwWAQHz9+jNFoVJmG\n4JoQpQ8M8uq/LL3f71dI1mw2Sxacsyq3dmfFHgpy1xwFlm5KV+VnDjEn8sF61cDdgw+Anc9RX2dW\nel8bB0gs2u12DAaDGI1GlekdLnumgH2FFvsh5qsCcaPRSIBprk33yoUBj9kcp4P7U8AiU6N2yBUw\n32Fx02tpL3VeYHjOwkF5Mns4HNIybDFVERR5AS8Yc+293nNLd+tRRX8wGKTvM37WxaDceKl4dL8v\ntXedketxRk0D4QJFEoXcxKK7An2WIyIEi3GKrkcbHJS06vm+GiniieHWpRcEVJYjj6JYHfG0plJg\n0bXXyZDX0OW/VheMeOch/HwgweMadA3SZ4TZSQ6I7jAHlp7JFILWJe1krJJFzOfzuLu7S/mhJ+85\njdZzdQ+RG067e190bykJX7l8UH3WPbgI9qSZYndfObCkdRQqr/VKdU4hcoyRjQNzjcyBtd1uYz6f\nx+3t7bMdLH5dLgdzYRZFkcAiEyVYh0P1ZFJPyDnWXAriHuvdYLlwnRiIyqueJ61xH51jQPrpbtGB\nkpB9ap0ukexOcWK5XMbd3V1lpyJzLC/zsB/OEPk81helmL5G3SskPma6T41BlnkyWLqJxwwtTBmN\nRjGdTpPpc+AaPO/D9ygod5HUxBxQ1EqveqzX65hMJom5aTUvNz84KfBxsmIS8XTOkgDR+75QKMd+\n+blKVzx/MOLECoaDRSalDnc6nRiNRml1q1uWxzzel7/X0XvPq0g0BBanTkQoHh8fYzabVfYraw2h\n/n1gbiOBwOKqLiXAo9EoBoNBSle4hTUX3308GpO+pyTev3MUWGqucUws2+12OoieluUv7/hLz+G1\nEU9uiZYly+beLpWXWG05Pz+PxWKRviuSwKo7gzvjk2IRLdcnW+X+9V3mf1JYMlIpgdYkigmy+HAU\nWOyEQPAqgMDSylknJbyXJ8I5JlYXJz2GsOCruS6yUCWvPF1NxdiIqORifg6V2KCWT3MJAZcwuEIR\nKBW9OXkrMASWlKGOWL0bLKfQ9OfSrMFgUCk9SbjOhF4z95xF+stZqATNzQxK1iVYrr7lCTJ0QVQY\nKiQtz2OyLI6ykfDZ1B/OchOc/xU3SJ+rWpymTFTYVelJhyiW5fPto7nm1QSvVFAwRfE0Ucn1gBQ4\ntVwukQoj5sg4zPxIbpYWoVjmyuPrPWhRGpPie0TEcDiM+XyeLJ0e52iwqEG6oTqs/VLc96QqvDrB\nTnuHnBkyAPtLwvWJSVpM7t6yHILlNTjWC13bmSfyWvZdnzP2cB8Wgde5gvrnnyJBr8XyN4HlnZfg\ntB5jNpulrTnD4TAuLy/j4uIiLVnTZzl/7LmV50LOphisuWDGY13OlfCezMUcOJ+x9sSeABNQjoVF\nAyqcrxLbbDaxWCwqqcVLZac3HxWupgGuVqu4v7+P+Xwe4/E4kYyrq6tUNZB5K9BTqBReLtdxV6Z+\nkOFRSK9ppQPmblKg+QIaeQVXNgItwPgM9k2xTGC1Wk//vuPu7i6B9VqN8N3/5Ydgff/+Pe7v7+Pj\nx4/RbDaj3+/H1dVVXF1dxXQ6TVt6nGjwfjkr4t++AIVu0EFW8wDvrM3dusc2uS+mAH4/Aqx75EDl\nGLUzpd1up9xNp4K+paD7ptqgm3lZlrFYLGIymcRkMom///3vEfFj493V1VV8+PAh/vvf/ybmpa2e\nbBKQC4K5BmMGP6c79t/rxsDCL0FnfyKq51awWqFruHTBXaG+7zFPcZU7WvQ7yZEYdF17879k8rZc\nLmMymcTt7W064q7b7Sawut1uEpJYIYucvLe7PoFYB5a+m4s5EpJbmq/N4z3JVpkS8CTOHEOtS0nc\nnQuQHFjMDU8GS51xN7Zer+Ph4SHFreVyGRGRdg6Ox+MYjUZpy9ByuUzHGWhSsi6ncBeoaoH64tqd\nS7wdFP6tn6x0yxVxcYzyMQZ/xigm5VyBRcviUQ4qdmtejIecRPwIF7kTud8MFuk0B6y1CPf392mV\nbkSkPbnj8TidqLZcLtM/Z9a215eSYoLmk3X6KcFJ09VocRImXThdnwDiYk4RjLIs00omP7Byv99X\nNuvV5ZF6npiywBKAzEuLoqgUjI8Ci4A5WPP5PB4eHtJLmjIYDOLi4iI+fPhQ2ZfLTd+5QOzVEgmU\nboYWxYoBQdJ71Hin1hyXK4bcEU/t5BpFuW8tkcu99Dzt5xoMBpVzclutVmXTn04i4HmLR4OlRia1\nXq9jPp/HdDqN29vbylTFeDyOz58/pxqY/hsAJ+m4qUD3zlF3AkswfLVSrshMt0diQVel+3Kcsihu\nyNYy7mazmTaSqwjAaghnfTXXp3/gSdfLzem9Xi9Z808Di2UXTZ3f39/HZDJJg2g2m3FxcZF2TU4m\nk/j+/XsCi8wrlx8RHMYXtyppPI+Z49/qOxNbLh6ty504+6xtStwCK6A0zUK3LJcsUqEzOCQXKZHc\no4580MrmkyoYHAg1Xhq8Wq1iOp3Gzc1NdDqdVCNsNpspSf748eOzo+EEUp3wckyLbkZCYRwiGM7i\n1JjL5OIm760EXPFJh54IJC7uZAVF4+KRfNr9IrCkALqviuB1sfxdYKmRXsuvT6fTmEwmMRgMYjwe\nJ7fQ7Xbj8vIy/vrrryjLMm5ubtKBWdJG0mN3i2y52qEGLYG5ZeRejD8SbG58Lkwez6DY432l25bL\n9GP5VH8kK2TO9dPA8mqD3NrDw0N8/fo1xuNx/PHHH2mplg6V//jxYxwOPxaW3N/fV2LNayUiCsEp\nekT1vEKWdwgaNzJERHouFcDLSyIGPOhEu0b8oBMHSmAQBM7zqa+aS/PrTgLL/bqoqqbxZ7NZXF9f\nx4cPH+Lz58+VVUEKpOPxOK6urlJNUf8Zlat5SSoICCk6yQjrcD7TK0v1OKegrtlin6WVpdI98ViG\nXILOvjIejUajdPoarV4KxkWoPFDlJLDYMcYaCWs2m8V6vY7Pnz+nJFjaXBRFqmbolOmyLFPsIv31\nslFd4utgSUC0MuZeFCrXPoiMkCVyOyz/zS2tlwrlpbKISIuIhsNhWq/h9UR3tWKZP2Va339nnqF/\nXjaZTOLbt2+V4+E4/9Pr9eLDhw9pZY8sSxN/yosoGK8IuGVx+iKXS/Fa3kM1OwV+rwlyJa8n5e6W\nPekWoZBrc+smaLJintI2n8+PA4saSrDkEvb7p/8mcHNzE9fX19Hr9eJvf/tbjMfjOBwOMZ/PY7vd\nRrvdjqurqxTrtI9Ky56lxX7UwUsVaQfCczQJitfJ3SkuEWABxmfT6sn+3Kq4xJpuzXeoEDiFCaUC\nJ/1LJoJFwAjWdvvjLNu7u7u4vr5Os6DSTDFH7XWSS1yv12kRppYHcD6JBU49O1fj80q3g+UxUPf2\nuSuOS2NnrOF1Pn9Fq/Ljh1j58Hsq9v8UsOrcIE1a7EeLKkejUXz48CH++uuvyllOAinix+H+RVGk\nAL5YLCp7dun/lc+oAuCg6G8HNRdX+DetTu+z1kiQc66WQBdFUTmEknvW+NzcugyC1ev1arF4VwWD\ng8+B9fj4GLe3t9HtduPTp08xn8+Thms2VOCNRqO0fK3VasXd3V18//49FYTJzlQlUAXA+ya2SMBc\nUBRY7vs5FuokJhcX6S5ZrcgtHuWcV8TTXCGPg1Xak2tvZoMMrhqQTFpC3e12MZvN4u7uLm5ubuLr\n16/pXz5oXbiSQrkgTamwhKRByjXqOxQOi66sxlPgdcA4eAQgB3YdaCQyzK1UXec91Oiyqeg8IOwk\nsHIuhdpMZrher2M6nca3b9/i3//+d/z5559xcXERnU6ncoRpxFN80DFxuo8Kvzzp05dG+6Zw9icn\nJAchxyY1vjqXmqPwAiAiKokw10964s37MtbxDJCTwfIBkQY3Go2Uu8xms7i5uUnHD/T7/RgOh8ka\nFHDl83Umujp9e3ub6ogaMP/vh4BjLFGy6cpFcHJuLkf9pTi8PvcdNX2fG+N9n3VOplRQpRFcRnAU\nWO6vX7pqCCkOAAAGYUlEQVROCel0Oo0vX75UllarFhgRifLrvhK2JuoionJkt9witdktS8/OVUA8\nz5HSeMWABINxM5fD0UPQlSl3c6vyXI8F49yaj3eDlUsw6xqnxx8eHmKz2aREeDQaRcTTP/zkQhrd\nnytWdYrmfD6P+XyehEvXUSdcd2M+WamXC0/gK/BzzQhLWT556dV5VvxZsWDjZz8NrFzL5S/OeOTG\nJpNJfP36NZVfhsNhlOXTJBsJgoDjBgF9zh2Lqorkvl/Xx9z7nqTyd1LsOkaozxSreA4vl1jn5Cfl\nUb+5SaKuvXtPsTron0sbpYGqqt/f38f19XU0Go34xz/+ERcXF4nhcaMaXYWE32w2Uzzj1IbAZp9y\nRCGXk/FVxw65PJsz2iyB8VpNSgos9oNyciZNgiZXerJl1SXGaszwWZjVlIgAGI/HyWWoiMrpDNda\nCUpVcv67Wc04S0GcKLgb9FjnnkGNLjkHGAXvYJEEuUL7T89TmVOeBFYOsJy/l7tSU8VhsVjE7e1t\nfP36Na6vr2M0GkVRFOlgYLk1nwLRT7ka3VMUl2svmCx7PY/30k8JlBOXBIH7trjGgmsrVHnQbkjf\n+cn+83c+XwrAua2TwKrz/bmgyetUcZjP53E4HNIE5W63i/F4nOqEGhyTZmdQ0m5Ok/NUGcYu9jnn\n5iis3JhIp+XWvBYp1qqXpkIiouLe/LkRUfEAPxWsHEh6eE4o7s8Vk2RdX758ScLgP7TMTS6y0T1J\nu0lAuCztLSAxP3TNZ/Wdc2RUluFwmBROazI8lagjJHXv6f5Hg6Wbcb2Bk43cw91tRETMZrMEFrWI\n5Sq/V90zuSer0WhU9v9qKt8ZHccQUQWes8363a1Jswb9fj8Gg0ECynM+J1u5/vN9uuCfMvlIS8q1\nnKaow9L+2WyW1r3rACxRXp8ldcvl+7QsCkVWzN2JTAP8/rQeLoNWEkxlENG5uLiIi4uLtC5D23e4\nCEjUn1WM3My1pxDMH48CK+eOfFrB8xsHli5Law1vb29TEffy8rKy9sFdGQWn+7JqoTwn4sn36/9s\n0e058F5j5OFbJC7KnXq9XiyXy1gsFsnCVFri5j7dW7+zfOXjyL1+GlgCzP/OCYSmLZak+DWZTNJg\ntFpVYHFmtY518tmMZ0oLlI+9JBA9Uxv/9NLEqGqdqkz0er14eHiI6XSaJliHw2GyNh50LNlRBky+\nc2NykvRusOoAe0tzRqdOaYVTURTpdBpZVm5qIZenyKpo2fq+YhZjCJ8vgREsWfxsNkvuWqBr2kM5\n3mKxSEvUxuNxsr6yLJ/9f2XJz5Nxjo2A/dRtqq9dVxdI+d5ut0t0XhraaDRiMBhU/qmZ7ucpwUvW\nzljkYEVExd0JrLIs02JVbbLgHJqaFE2bMs7OfpxSoIWrFxcXiXSoj5z+4P04RqYuJ691f60RqJwg\nnaSodLRarRJYosSj0aji6ljZoFbmGCiLrVIKutOiKFJNUVV+gaUN7dPpNKbTaZqW0ZYefUdAi8y0\n2+1UiVmtVnF1dZWSfs0Ci0kq9ikkqG9kkyf9ZzoK232+W1MuSLoLY8cOh0NaaKNlxJoe4bpELnV2\n0OqYFgFkPyVALqderVbpn1frMC6lFxQwabyeSxIjAsVT2JrNZrq/qh5cMMqczl3ku8FyIHIlf/e7\nuQeSmjJ2LJfLuL29TTnM5eXls/9kkItZdcrhVigB+9RHWZZp39hyuUxTMVqeRhpPxXGqLyWTBQl8\nnu7pe4pF+1kUeG2B55vAqhNK7vO66gGbEw7Fina7nXZLDgaDNABaju6bWyfhfckl6yIVmm/a7/eJ\nimt3Zr/fr2zMziXNXIjKpWdSCsUwziaoRKX18/qMlkmClWvv3pjg7SVg/BpeS3eobUPfvn2LZrOZ\njmqQUASa53gv9Snnitnkgm9vb9NEKd2c4hmrGfoeN1ToM+4pJgsky9vtdrFYLFJs5BYigXV5eVkr\nx6O3/Hg+9ZLV+U/6fzEglZAOh0Pa20U34bTWreal5+X6Jqu6u7t7BlYOKMYlEhZam8bDhF0KqXGK\n7SmRVlFAm+1+Klh1wnlvo7AFBHctOmlwa8o9N0d46lIPCjE3peGv3JjrPs89i9bFColO88zVE5+N\n75X4chwSv9vJrSzLZxrwIli/2/+v9jJX/N3+X7XfYP1C7TdYv1D7DdYv1H6D9Qu1/wGM5bVPIm1v\n5wAAAABJRU5ErkJggg==\n",
       "text": [
        "<matplotlib.figure.Figure at 0xa616208>"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "Target names: \n",
        "-------------\n",
        "['Ariel Sharon' 'Colin Powell' 'Donald Rumsfeld' 'George W Bush'\n",
        " 'Gerhard Schroeder' 'Hugo Chavez' 'Jacques Chirac' 'Jean Chretien'\n",
        " 'John Ashcroft' 'Junichiro Koizumi' 'Serena Williams' 'Tony Blair']\n",
        "\n",
        "Target: \n",
        "-------\n",
        "[11  4  2 ...,  3 11  5]\n",
        "\n",
        "Data: \n",
        "-----\n",
        "[[  75.66666412   85.33333588   99.66666412 ...,   95.66666412\n",
        "   137.66667175  142.33332825]\n",
        " [  23.33333397   46.66666794   79.33333588 ...,   50.66666794   51.           65.        ]\n",
        " [ 100.33333588   80.33333588   56.         ...,  179.66667175\n",
        "   159.66667175   98.        ]\n",
        " ..., \n",
        " [  49.66666794   47.66666794   72.         ...,  206.          191.\n",
        "   168.33332825]\n",
        " [ 118.33333588  150.          163.         ...,  228.66667175\n",
        "   231.66667175  228.        ]\n",
        " [  31.33333397   25.66666603   25.66666603 ...,   23.66666603   25.\n",
        "    36.66666794]]\n"
       ]
      }
     ],
     "prompt_number": 1
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Particionar los datos en dos conjuntos dijuntos de entrenamiento y testeo:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from sklearn.cross_validation import train_test_split\n",
      "\n",
      "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 2
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Extraer atributos de las im\u00e1genes para ser utilizados en el modelo de clasificaci\u00f3n. Para esto, investigar las clases de\n",
      "*Principal Component Analysis (PCA)* del paquete *sklearn.decomposition*:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from sklearn.decomposition import RandomizedPCA, PCA, KernelPCA\n",
      "\n",
      "pca = PCA(n_components=0.9, whiten=True).fit(X_train)\n",
      "#pca = KernelPCA().fit(X_train)\n",
      "#pca = RandomizedPCA(iterated_power=5, whiten=True).fit(X_train)\n",
      "X_train_pca = pca.transform(X_train)\n",
      "X_test_pca = pca.transform(X_test)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 3
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**PREGUNTA: Explique el m\u00e9todo de extracci\u00f3n de atributos y justifique su elecci\u00f3n.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**RESPUESTA:**\n",
      "\n",
      "PCA se utiliza para descomponer un conjunto de datos multivariado en un conjunto de componentes ortogonales sucesivas. Su objetivo es reducir la dimensionalidad de un conjunto de datos, teniendo como resultado la proyecci\u00f3n seg\u00fan la cual los datos queden mejor representados en t\u00e9rminos de m\u00ednimos cuadrados. \n",
      "\n",
      "De entre las principales clases para extraer attributos analizamos las siguientes:\n",
      "* PCA                \n",
      "* RandomizedPCA      \n",
      "* KernelPCA\t\t     \n",
      "* SparsePCA\n",
      "\n",
      "**PCA**\n",
      "\n",
      "Reducci\u00f3n de dimensionalidad lineal utilizando descomposici\u00f3n en valores singulares (SVD) de los datos, y manteniendo solo los vectores singulares m\u00e1s significativos para proyectar los datos a un espacio de dimensi\u00f3n inferior. Solo funciona para matrices densas y no es escalable para datos de grandes dimensiones.\n",
      "\n",
      "**RandomizedPCA**\n",
      "\n",
      "Reducci\u00f3n de dimensionalidad lineal utilizando descomposici\u00f3n en valores singulares (SVD) **aproximados** de los datos y manteniendo solo los vectores singulares m\u00e1s significativos para proyectar los datos a un espacio de dimensi\u00f3n inferior. \n",
      "\n",
      "**KernelPCA**\n",
      "\n",
      "En lugar de realizar la reducci\u00f3n de dimensionalidad linealmente, se hace mediante el uso de funciones kernel no lineales.\n",
      "\n",
      "**SparsePCA**\n",
      "\n",
      "Busca el conjunto de componentes dispersos que pueden reconstruir de manera \u00f3ptima los datos. El nivel de dispersi\u00f3n es controlable por el coeficiente de penalizaci\u00f3n L1, dado por el par\u00e1metro alfa.\n",
      "\n",
      "Hay una obvia conveniencia de utilizar RandomizedPCA frente a PCA para arreglos densos o largos conjuntos de datos.\n",
      "SparcePCA demora mucho y es rotundamente descartado por eso.\n",
      "\n",
      "Se realizaron pruebas para determinar el mejor algorimo siendo el ganador el algorimo KernelPCA con par\u00e1metros component=0.9 y whiten=True."
     ]
    },
    {
     "cell_type": "heading",
     "level": 5,
     "metadata": {},
     "source": [
      "Elija dos algoritmos de aprendizaje y entrene e intente obtener los mejores modelos de clasificaci\u00f3n posibles:"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Como algorimos de aprendizaje elegimos \u00c1rboles de decision y el algoritmo SVC, a continuaci\u00f3n los parametros de cada uno:\n",
      "\n",
      "**\u00c1rbol de decisi\u00f3n:** \n",
      "\n",
      "+ **criterion: ** String, opcional (por defecto=\u201dgini\u201d). Funci\u00f3n para medir la calidad de una partici\u00f3n de ejemplos. Opciones: \"gini\" o \"entropy\".\n",
      "+ **splitter: ** String, opcional (por defecto=\u201dbest\u201d). Estrategia utilizada para separar ejemplos en cada nodo. Opciones: \"best\" para elegir la mejor partici\u00f3n, o \"random\" para elegir la mejor partici\u00f3n aleatoria. \n",
      "+ **max_features** (int, float, string o None, opcional (por defecto=None))\n",
      "Cantidad de atributos a considerar cuando se est\u00e1 buscando la mejor partici\u00f3n.\n",
      "    + Si es int: M\u00e1xima cantidad que se considera en cada partici\u00f3n.\n",
      "    + Si es float: Porcentaje de atributos que se consideran en cada partici\u00f3n.\n",
      "    + Si \"auto\" o \"sqrt\": Se considera la ra\u00edz cuadrada del total de atributos posibles.\n",
      "    + Si \"log2\": Se consideran log2(total de atributos posibles) atributos.\n",
      "    + Si None: Se considera el total de atributos disponibles.\n",
      "\n",
      "+ **max_depth: ** Int o None, opcional (por defecto=None). Profundidad m\u00e1xima del \u00e1rbol. Si no se pone nada se contin\u00faa el algoritmo hasta que los conjuntos de ejemplos no se puedan subdividir (sean puros), o que todos los conjuntos finales tengan menos elementos que el valor del par\u00e1metro min_samples_split. Se ignora si el par\u00e1metro max_samples_leaf no vale None.\n",
      "\n",
      "+ **min_samples_split: ** Int, opcional (por defecto=2). M\u00ednima cantidad de ejemplos requeridos para dividir el un nodo (vale 2 por defecto)\n",
      "+ **min_samples_leaf** : Int, opcional (por defecto=1). M\u00ednima cantidad de ejemplos requeridos para estar en un nodo hoja (vale 1 por defecto).\n",
      "\n",
      "+ **max_leaf_nodes** : int o none, opcional (por defecto=none). Hace crecer un arbol con max_leaf_nodes nodos en la forma de primero el mejor (los mejores nodos se definen como los que logran una reducci\u00f3n relativa de la impureza). Si none es especificado entonces se puede obtener un numero ilimitados de nodos hoja. Si no se especifica 'none' entonces el parametro max_depth ser\u00e1 ignorado.\n",
      "\n",
      "+ **random_state: ** int, instancia de RandomState o None, opcional (por defecto=None). Especifica la forma en que se generan los n\u00fameros aleatorios. Si se le pasa un valor entero \u00e9ste ser\u00e1 la semilla para la generaci\u00f3n de los n\u00fameros, si se le pasa una instancia de RandomState \u00e9sta ser\u00e1 la generadora de los n\u00fameros, y si es \"None\" el generador es la instancia de RandomState usada por np.random.\n",
      "\n",
      "\n",
      "** SVC: **\n",
      "\n",
      "+ **C: ** Float, opcional (por defecto=1.0): Par\u00e1metro de penalizaci\u00f3n del t\u00e9rmino de error.\n",
      "+ **kernel: ** String, opcional (por defecto=\u2019rbf\u2019): Especifica el tipo de n\u00facleo que ser\u00e1 usado por el algoritmo. Puede ser \u2018linear\u2019, \u2018poly\u2019, \u2018rbf\u2019, \u2018sigmoid\u2019, \u2018precomputed\u2019 o el llamado a una funcion que se utiliza para precomputar la matriz del kernel.\n",
      "+ **degree: ** Int, optional (por defecto =3). Grado de la funci\u00f3n polinomial n\u00facleo, solo se toma en cuenta si el n\u00facleo es \"poly\", en otro caso se ignora.\n",
      "+ **gamma: ** Float, opcional (por defecto =0.0). Coeficiente del n\u00facleo para \u2018rbf\u2019, \u2018poly\u2019 y \u2018sigmoid\u2019. Si  gamma vale 0.0 entonces se toma 1/(cantidad de atributos).\n",
      "+ **coef0 : ** Float, opcional (por defecto=0.0). T\u00e9rmino independiente de la funci\u00f3n n\u00facleo. Solo se toma en cuenta en \u2018poly\u2019 y \u2018sigmoid\u2019.\n",
      "+ **probability: ** Boolean, opcional (por defecto=False):\n",
      "Indica si se deben permitir las estimaciones de probabilidad. Si desea utilizarse debe ser activado previo al entrenamiento, y provocar\u00e1 su relentecimiento.\n",
      "+ **shrinking: ** Boolean, optional (por defecto=True). Indica si se debe habilitar el \"shrinking\" (contracci\u00f3n), una t\u00e9cnica de optimizaci\u00f3n que reduce el conjunto de entrenamiento. \n",
      "+ **tol: ** Float, opcional (por defecto=1e-3)\n",
      "Tolerancia para el criterio de detenci\u00f3n del algoritmo.\n",
      "+ **cache_size: ** Float, optional. Tama\u00f1o del cach\u00e9 del n\u00facleo (en MB).\n",
      "+ **class_weight: ** {dict, \u2018auto\u2019}, opcional. Ajusta el par\u00e1metro C de la clase i como class_weight[i]*C para SVC. Si no se da, se supone que todas las clases tienen peso uno. El modo 'auto' utiliza los valores de 'y' para ajustar autom\u00e1ticamente los pesos inversamente proporcionales a las frecuencias de clase.\n",
      "\n",
      "+ **verbose: ** Bool, por defecto: False. Activa o desactiva la salida detallada.\n",
      "+ **max_iter: ** int, opcional (por defecto =-1)\n",
      "L\u00edmite m\u00e1ximo en iteraciones para la salida. Si vale -1 no tendr\u00e1 l\u00edmite.\n",
      "+ **random_state: ** Int, RandomState, o None (por defecto)\n",
      "Igual al par\u00e1metro random_state del algoritmo de \u00e1rbol de decisi\u00f3n."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Imprima los mejores resultados de precision, recall y accuracy para los algoritmos seleccionados:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from sklearn import grid_search\n",
      "from sklearn.pipeline import Pipeline\n",
      "from operator import itemgetter\n",
      "from scipy.stats import randint as sp_randint\n",
      "\n",
      "# imprimir_performance\n",
      "def imprimir_performance( X, y, clf ):\n",
      "    from sklearn import metrics\n",
      "\n",
      "    y_predicted = clf.predict(X)\n",
      "\n",
      "    print \"Performance para: %s\" % clf\n",
      "    print \n",
      "    print metrics.classification_report(y, y_predicted)\n",
      "    print\n",
      "    print \"Accuracy\"\n",
      "    print metrics.accuracy_score(y, y_predicted)\n",
      "    print\n",
      "    \n",
      "# report\n",
      "def report(grid_scores, n_top=3):\n",
      "    top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]\n",
      "    for i, score in enumerate(top_scores):\n",
      "        print(\"Modelo rankeado en la posici\u00f3n: {0}\".format(i + 1))\n",
      "        print(\"Puntuaci\u00f3n: {0:.3f} (std: {1:.3f})\".format(\n",
      "              score.mean_validation_score,\n",
      "              np.std(score.cv_validation_scores)))\n",
      "        print(\"Par\u00e1metros: {0}\".format(score.parameters))\n",
      "        print(\"\")\n",
      "\n",
      "# imprimir_performance detallada\n",
      "def imprimir_performance_detallada(clf, n_top=3):\n",
      "    print \"Ranking mejores puntuaciones\"\n",
      "    print \"---------------------------- \\n\"\n",
      "    \n",
      "    report(clf.grid_scores_)\n",
      "\n",
      "    print \"Impresion de performance para el modelo mejor evaluado\"\n",
      "    print \"------------------------------------------------------ \\n\"\n",
      "    imprimir_performance(X_test_pca, y_test, clf)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 4
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from sklearn import svm, tree\n",
      "from sklearn.naive_bayes import GaussianNB\n",
      "from sklearn import grid_search\n",
      "import numpy as np\n",
      "\n",
      "print \"***************\"\n",
      "print \"* Pruebas DT  *\"\n",
      "print \"***************\\n\"\n",
      "\n",
      "parameters_dt = {'criterion':['gini','entropy'], \n",
      "              'max_depth':[4,5,None],\n",
      "              'min_samples_split':[2,3], \n",
      "              'min_samples_leaf':[2,3],\n",
      "              'splitter':['best','random'],\n",
      "              'max_features':[0.6, 0.9, None]}\n",
      "\n",
      "dt = tree.DecisionTreeClassifier()\n",
      "clf_dt = grid_search.GridSearchCV(dt, parameters_dt)\n",
      "clf_dt.fit(X_train_pca, y_train)\n",
      "imprimir_performance_detallada(clf_dt)\n",
      "print \"\\n\\n\"\n",
      "\n",
      "print \"***************\"\n",
      "print \"* Pruebas SVC *\"\n",
      "print \"***************\\n\"\n",
      "\n",
      "parameters_svc = {'kernel':['rbf','sigmoid','linear','poly'],\n",
      "              'gamma':[0,0.1, 0.001],\n",
      "              'tol':[0.01,0.1],\n",
      "              'C':[4],\n",
      "              'degree': [2,3]}\n",
      "svc = svm.SVC()\n",
      "clf_svc = grid_search.GridSearchCV(svc, parameters_svc)\n",
      "clf_svc = clf_svc.fit(X_train_pca, y_train)\n",
      "imprimir_performance_detallada(clf_svc)\n",
      "print \"\\n\\n\"\n",
      "\n",
      "nb = GaussianNB()\n",
      "nb = nb.fit(X_train_pca, y_train)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "***************\n",
        "* Pruebas DT  *\n",
        "***************\n",
        "\n",
        "Ranking mejores puntuaciones"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "---------------------------- \n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 1\n",
        "Puntuaci\u00f3n: 0.395 (std: 0.010)\n",
        "Par\u00e1metros: {'splitter': 'best', 'min_samples_leaf': 2, 'min_samples_split': 2, 'criterion': 'entropy', 'max_features': None, 'max_depth': 5}\n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 2\n",
        "Puntuaci\u00f3n: 0.395 (std: 0.008)\n",
        "Par\u00e1metros: {'splitter': 'best', 'min_samples_leaf': 2, 'min_samples_split': 3, 'criterion': 'entropy', 'max_features': None, 'max_depth': 5}\n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 3\n",
        "Puntuaci\u00f3n: 0.395 (std: 0.008)\n",
        "Par\u00e1metros: {'splitter': 'best', 'min_samples_leaf': 3, 'min_samples_split': 2, 'criterion': 'entropy', 'max_features': None, 'max_depth': 5}\n",
        "\n",
        "Impresion de performance para el modelo mejor evaluado\n",
        "------------------------------------------------------ \n",
        "\n",
        "Performance para: GridSearchCV(cv=None,\n",
        "       estimator=DecisionTreeClassifier(compute_importances=None, criterion='gini',\n",
        "            max_depth=None, max_features=None, max_leaf_nodes=None,\n",
        "            min_density=None, min_samples_leaf=1, min_samples_split=2,\n",
        "            random_state=None, splitter='best'),\n",
        "       fit_params={}, iid=True, loss_func=None, n_jobs=1,\n",
        "       param_grid={'splitter': ['best', 'random'], 'min_samples_leaf': [2, 3], 'max_features': [0.6, 0.9, None], 'criterion': ['gini', 'entropy'], 'min_samples_split': [2, 3], 'max_depth': [4, 5, None]},\n",
        "       pre_dispatch='2*n_jobs', refit=True, score_func=None, scoring=None,\n",
        "       verbose=0)\n",
        "\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "          0       0.00      0.00      0.00        16\n",
        "          1       0.27      0.33      0.30        60\n",
        "          2       0.55      0.18      0.27        33\n",
        "          3       0.49      0.73      0.58       117\n",
        "          4       0.29      0.07      0.11        29\n",
        "          5       0.27      0.17      0.21        23\n",
        "          6       0.00      0.00      0.00        11\n",
        "          7       0.18      0.11      0.13        19\n",
        "          8       0.00      0.00      0.00        14\n",
        "          9       0.13      0.14      0.14        14\n",
        "         10       0.71      0.36      0.48        14\n",
        "         11       0.29      0.45      0.35        40\n",
        "\n",
        "avg / total       0.34      0.37      0.33       390\n",
        "\n",
        "\n",
        "Accuracy\n",
        "0.369230769231\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "***************\n",
        "* Pruebas SVC *\n",
        "***************\n",
        "\n",
        "Ranking mejores puntuaciones"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "---------------------------- \n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 1\n",
        "Puntuaci\u00f3n: 0.745 (std: 0.007)\n",
        "Par\u00e1metros: {'kernel': 'rbf', 'C': 4, 'tol': 0.01, 'gamma': 0, 'degree': 2}\n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 2\n",
        "Puntuaci\u00f3n: 0.745 (std: 0.008)\n",
        "Par\u00e1metros: {'kernel': 'rbf', 'C': 4, 'tol': 0.1, 'gamma': 0, 'degree': 2}\n",
        "\n",
        "Modelo rankeado en la posici\u00f3n: 3\n",
        "Puntuaci\u00f3n: 0.745 (std: 0.007)\n",
        "Par\u00e1metros: {'kernel': 'rbf', 'C': 4, 'tol': 0.01, 'gamma': 0, 'degree': 3}\n",
        "\n",
        "Impresion de performance para el modelo mejor evaluado\n",
        "------------------------------------------------------ \n",
        "\n",
        "Performance para: GridSearchCV(cv=None,\n",
        "       estimator=SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,\n",
        "  kernel='rbf', max_iter=-1, probability=False, random_state=None,\n",
        "  shrinking=True, tol=0.001, verbose=False),\n",
        "       fit_params={}, iid=True, loss_func=None, n_jobs=1,\n",
        "       param_grid={'degree': [2, 3], 'kernel': ['rbf', 'sigmoid', 'linear', 'poly'], 'C': [4], 'tol': [0.01, 0.1], 'gamma': [0, 0.1, 0.001]},\n",
        "       pre_dispatch='2*n_jobs', refit=True, score_func=None, scoring=None,\n",
        "       verbose=0)"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "          0       0.87      0.81      0.84        16\n",
        "          1       0.76      0.80      0.78        60\n",
        "          2       0.86      0.58      0.69        33\n",
        "          3       0.65      0.98      0.78       117\n",
        "          4       0.86      0.62      0.72        29\n",
        "          5       1.00      0.57      0.72        23\n",
        "          6       1.00      0.18      0.31        11\n",
        "          7       1.00      0.74      0.85        19\n",
        "          8       1.00      0.43      0.60        14\n",
        "          9       1.00      0.79      0.88        14\n",
        "         10       0.90      0.64      0.75        14\n",
        "         11       0.84      0.78      0.81        40\n",
        "\n",
        "avg / total       0.81      0.77      0.76       390\n",
        "\n",
        "\n",
        "Accuracy\n",
        "0.766666666667\n",
        "\n",
        "\n",
        "\n",
        "\n"
       ]
      }
     ],
     "prompt_number": 5
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "imprimir_performance(X_test_pca, y_test, nb)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "Performance para: GaussianNB()\n",
        "\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "          0       0.65      0.81      0.72        16\n",
        "          1       0.85      0.68      0.76        60\n",
        "          2       0.76      0.39      0.52        33\n",
        "          3       0.56      0.90      0.69       117\n",
        "          4       0.65      0.45      0.53        29\n",
        "          5       0.93      0.61      0.74        23\n",
        "          6       0.60      0.27      0.37        11\n",
        "          7       0.88      0.37      0.52        19\n",
        "          8       0.78      0.50      0.61        14\n",
        "          9       1.00      0.79      0.88        14\n",
        "         10       0.72      0.93      0.81        14\n",
        "         11       0.81      0.62      0.70        40\n",
        "\n",
        "avg / total       0.73      0.68      0.67       390\n",
        "\n",
        "\n",
        "Accuracy\n",
        "0.679487179487\n",
        "\n"
       ]
      }
     ],
     "prompt_number": 6
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**PREGUNTA: Analice los resultados obtenidos.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**RESPUESTA:**\n",
      "\n",
      "PCA se logran los siguientes mejores resultados:\n",
      "\n",
      "<table>\n",
      "<tr><td></td><td>precision</td><td>recall</td><td>f1-score</td><td>accuracy</td></tr>\n",
      "<tr><td>DT</td><td>0.41</td><td> 0.46</td><td>0.40</td><td>0.45652173913</td></tr>\n",
      "<tr><td>SVC</td><td>0.85</td><td>0.84</td><td>0.83</td><td>0.83850931677</td></tr>\n",
      "<tr><td>Gaussian</td><td>0.78</td><td>0.78</td><td>0.76</td><td>0.776397515528</td></tr>\n",
      "</table>\n",
      "\n",
      "RandomizedPCA se logran los siguientes mejores resultados:\n",
      "\n",
      "<table>\n",
      "<tr><td></td><td>precision</td><td> recall</td><td>f1-score</td><td>accuracy</td></tr>\n",
      "<tr><td>DT</td><td>0.26</td><td>0.40</td><td>0.30 </td><td>0.40372670807</td></tr>\n",
      "<tr><td>SVC</td><td>0.32</td><td>0.43</td><td>0.34</td><td>0.434782608696</td></tr>\n",
      "<tr><td>Gaussian</td><td>0.38</td><td>0.45</td><td>0.38</td><td>0.450310559006</td></tr>\n",
      "</table>\n",
      "\n",
      "KernelPCA se logran los siguientes mejores resultados:\n",
      "\n",
      "<table>\n",
      "<tr><td></td><td>precision</td><td> recall</td><td>f1-score</td><td>accuracy</td></tr>\n",
      "<tr><td>DT</td><td>0.45</td><td>0.45</td><td>0.44</td><td>0.453416149068</td></tr>\n",
      "<tr><td>SVC</td><td>0.85</td><td>0.85</td><td>0.85</td><td>0.847826086957  </td></tr>\n",
      "<tr><td>Gaussian</td><td>0.18</td><td>0.43</td><td>0.26</td><td>0.428571428571</td></tr>\n",
      "</table>\n",
      "\n",
      "Como se puede ver en los resultados obtenidos, el algoritmo que mejor se comport\u00f3 result\u00f3 ser el KernelPCA combiando con el clasificador SVC logrando un parejo 85% de precision, recall, f1-score y exactitud. Tambien existen otras combinaciones que dan buenos reusltados como PCA con SVC o Guassian. \n",
      "\n",
      "Para lograr obtener los mejores resultados se utiliz\u00f3, al igual que en el Laboratorio 1, el m\u00e9todo GridSearchCV. Dicho m\u00e9todo si bien es muy \u00fatil para obtener la configuraci\u00f3n de par\u00e1metros que mejores resultados logran, presenta la desventaja que exige mucho poder de c\u00f3mputo, dado que tiene un orden algor\u00edtmico de O(prod_n(k_i)) (donde n es la cantidad de par\u00e1metros y k_i la cantidad de posibles valores que puede asumir cada par\u00e1metro). Es decir, GridSearchCV hace una b\u00fasqueda exahustiva en el espacio de par\u00e1metros. \n",
      "\n",
      "Otra opci\u00f3n \u00fatil es usar RandomizedSearchCV[6], que en vez de utilizar una b\u00fasqueda exahustiva, realiza una b\u00fasqueda aleatoria, para la cual se puede asignar el m\u00e1ximo conjunto de valores entre los cuales probar. Sin embargo en nuestro caso, encontamos m\u00e1s \u00fatil por razones pragm\u00e1ticas, hacer una b\u00fasqueda exahustiva de un conjunto relativamente grande de par\u00e1metros, identificar los valores \u00f3ptimos, y luego, a modo de ejemplo, reducir dicho conjunto presentando solo un conjuno ejemplar.\n",
      "\n",
      "La ejecuci\u00f3n del algoritmo SparsePCA, conlleva una demora que excede la pr\u00e1cticidad, por lo que se dejo fuera del conjunto usado."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "print(\"--- %s Tiempo ejecucion, segundos ---\" % ((time.clock() - start_time)))"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "--- 244.072825887 Tiempo ejecucion, segundos ---\n"
       ]
      }
     ],
     "prompt_number": 7
    },
    {
     "cell_type": "heading",
     "level": 1,
     "metadata": {},
     "source": [
      "Text Processing"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "En esta secci\u00f3n trabajaremos en un problema que involucra el uso de textos como atributos en un clasificador.\n",
      "Se posee un dataset que contiene datos de pel\u00edculas de cine, donde cada instancia es una pel\u00edcula y sus atributos son:\n",
      "\n",
      "- T\u00edtulo de la pel\u00edcula\n",
      "- G\u00e9nero\n",
      "- Director\n",
      "- Elenco\n",
      "- Argumento\n",
      "\n",
      "El objetivo de esta secci\u00f3n es poder predecir el g\u00e9nero de la pel\u00edcula en funci\u00f3n del resto de los atributos (t\u00edtulo, director, elenco y argumento)."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Importe el dataset desde el archivo *movies.csv* provisto junto con este notebook (recuerde las herramientas y t\u00e9cnicas utilizadas en el laboratorio parte 1):"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "import numpy as np\n",
      "import sklearn \n",
      "import pandas as pd\n",
      "import time\n",
      "start_time = time.clock()\n",
      "\n",
      "#agrego el nombre de los atributos en la primera fila\n",
      "data = pd.read_csv('movies.csv', header=None, names=['titulo', 'genero', 'director', 'elenco','argumento']) "
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 8
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Imprima las primeras 10 instancias del dataset:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "data.head(10)"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "html": [
        "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
        "<table border=\"1\" class=\"dataframe\">\n",
        "  <thead>\n",
        "    <tr style=\"text-align: right;\">\n",
        "      <th></th>\n",
        "      <th>titulo</th>\n",
        "      <th>genero</th>\n",
        "      <th>director</th>\n",
        "      <th>elenco</th>\n",
        "      <th>argumento</th>\n",
        "    </tr>\n",
        "  </thead>\n",
        "  <tbody>\n",
        "    <tr>\n",
        "      <th>0</th>\n",
        "      <td>                   'Til There Was You</td>\n",
        "      <td>                        Romance-Comedy</td>\n",
        "      <td>                  Scott Winant</td>\n",
        "      <td> John Plumpis, Janel Moloney, Yvonne Zima, Chri...</td>\n",
        "      <td> Gwen grows up with her romantic mother constan...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>1</th>\n",
        "      <td>                                    6</td>\n",
        "      <td>                         Romance-Drama</td>\n",
        "      <td>                 Theo van Gogh</td>\n",
        "      <td>                    Ariane Schluter, Ad van Kempen</td>\n",
        "      <td> Sarah, 30, single, well educated, likes art, p...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>2</th>\n",
        "      <td>                       101 Dalmatians</td>\n",
        "      <td>                      Adventure-Comedy</td>\n",
        "      <td>                 Stephen Herek</td>\n",
        "      <td> Glenn Close, Jeff Daniels, Joely Richardson, J...</td>\n",
        "      <td> Fashion designer Anita and computer-game write...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>3</th>\n",
        "      <td>                         12 Angry Men</td>\n",
        "      <td>                                 Drama</td>\n",
        "      <td>                  Sidney Lumet</td>\n",
        "      <td> Martin Balsam, John Fiedler, Lee J. Cobb, E.G....</td>\n",
        "      <td> The defense and the prosecution have rested an...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>4</th>\n",
        "      <td>                 2 Days in the Valley</td>\n",
        "      <td>                          Comedy-Crime</td>\n",
        "      <td>                 John Herzfeld</td>\n",
        "      <td> Danny Aiello, Greg Cruttwell, Jeff Daniels, Te...</td>\n",
        "      <td> John Herzfeld deftly welds together a multitud...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>5</th>\n",
        "      <td>          20000 Leagues Under the Sea</td>\n",
        "      <td> Adventure-Drama-Family-Fantasy-Sci-Fi</td>\n",
        "      <td>             Richard Fleischer</td>\n",
        "      <td> Kirk Douglas, James Mason, Paul Lukas, Peter L...</td>\n",
        "      <td> The oceans during the late 1860-92s are no lon...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>6</th>\n",
        "      <td>                2001: A Space Odyssey</td>\n",
        "      <td>                        Mystery-Sci-Fi</td>\n",
        "      <td>               Stanley Kubrick</td>\n",
        "      <td> Keir Dullea, Gary Lockwood, William Sylvester,...</td>\n",
        "      <td> \"2001\" is a story of evolution. Sometime in th...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>7</th>\n",
        "      <td>                       22 Jump Street</td>\n",
        "      <td>                   Action-Comedy-Crime</td>\n",
        "      <td> Phil Lord, Christopher Miller</td>\n",
        "      <td> Jonah Hill, Channing Tatum, Peter Stormare, Wy...</td>\n",
        "      <td> After making their way through high school (tw...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>8</th>\n",
        "      <td>                       22 Jump Street</td>\n",
        "      <td>                   Action-Comedy-Crime</td>\n",
        "      <td> Phil Lord, Christopher Miller</td>\n",
        "      <td> Jonah Hill, Channing Tatum, Peter Stormare, Wy...</td>\n",
        "      <td> After making their way through high school (tw...</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>9</th>\n",
        "      <td> 3 Ninjas: High Noon at Mega Mountain</td>\n",
        "      <td>        Action-Adventure-Comedy-Family</td>\n",
        "      <td>                 Sean McNamara</td>\n",
        "      <td> Hulk Hogan, Loni Anderson, Jim Varney, Mathew ...</td>\n",
        "      <td> Three young boys, Rocky, Colt and Tum Tum toge...</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
        "</table>\n",
        "</div>"
       ],
       "metadata": {},
       "output_type": "pyout",
       "prompt_number": 10,
       "text": [
        "                                 titulo  \\\n",
        "0                    'Til There Was You   \n",
        "1                                     6   \n",
        "2                        101 Dalmatians   \n",
        "3                          12 Angry Men   \n",
        "4                  2 Days in the Valley   \n",
        "5           20000 Leagues Under the Sea   \n",
        "6                 2001: A Space Odyssey   \n",
        "7                        22 Jump Street   \n",
        "8                        22 Jump Street   \n",
        "9  3 Ninjas: High Noon at Mega Mountain   \n",
        "\n",
        "                                  genero                       director  \\\n",
        "0                         Romance-Comedy                   Scott Winant   \n",
        "1                          Romance-Drama                  Theo van Gogh   \n",
        "2                       Adventure-Comedy                  Stephen Herek   \n",
        "3                                  Drama                   Sidney Lumet   \n",
        "4                           Comedy-Crime                  John Herzfeld   \n",
        "5  Adventure-Drama-Family-Fantasy-Sci-Fi              Richard Fleischer   \n",
        "6                         Mystery-Sci-Fi                Stanley Kubrick   \n",
        "7                    Action-Comedy-Crime  Phil Lord, Christopher Miller   \n",
        "8                    Action-Comedy-Crime  Phil Lord, Christopher Miller   \n",
        "9         Action-Adventure-Comedy-Family                  Sean McNamara   \n",
        "\n",
        "                                              elenco  \\\n",
        "0  John Plumpis, Janel Moloney, Yvonne Zima, Chri...   \n",
        "1                     Ariane Schluter, Ad van Kempen   \n",
        "2  Glenn Close, Jeff Daniels, Joely Richardson, J...   \n",
        "3  Martin Balsam, John Fiedler, Lee J. Cobb, E.G....   \n",
        "4  Danny Aiello, Greg Cruttwell, Jeff Daniels, Te...   \n",
        "5  Kirk Douglas, James Mason, Paul Lukas, Peter L...   \n",
        "6  Keir Dullea, Gary Lockwood, William Sylvester,...   \n",
        "7  Jonah Hill, Channing Tatum, Peter Stormare, Wy...   \n",
        "8  Jonah Hill, Channing Tatum, Peter Stormare, Wy...   \n",
        "9  Hulk Hogan, Loni Anderson, Jim Varney, Mathew ...   \n",
        "\n",
        "                                           argumento  \n",
        "0  Gwen grows up with her romantic mother constan...  \n",
        "1  Sarah, 30, single, well educated, likes art, p...  \n",
        "2  Fashion designer Anita and computer-game write...  \n",
        "3  The defense and the prosecution have rested an...  \n",
        "4  John Herzfeld deftly welds together a multitud...  \n",
        "5  The oceans during the late 1860-92s are no lon...  \n",
        "6  \"2001\" is a story of evolution. Sometime in th...  \n",
        "7  After making their way through high school (tw...  \n",
        "8  After making their way through high school (tw...  \n",
        "9  Three young boys, Rocky, Colt and Tum Tum toge...  "
       ]
      }
     ],
     "prompt_number": 10
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Entrene un clasificador que aprenda a predecir el g\u00e9nero de la pel\u00edcula utilizando \u00fanicamente el atributo \"argumento\". Para esto es necesario primero transformar el atributo de texto a atributos num\u00e9ricos. El paquete *sklearn.feature_extraction.text* contiene clases que permiten transformar atributos de texto en num\u00e9ricos, realice esa transformaci\u00f3n:"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**COMENTARIO** Utilizaremos las funciones CountVectorizer y TfidfTransformer de esta forma transformamos las palabras en n\u00fameros, en este caso la frecuencia con la que aparece una palabra. Ademas utilizamos un pipeline para hacer m\u00e1s sencilla la implementaci\u00f3n del clasificador."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#obtengo el argumento\n",
      "X = data['argumento'];\n",
      "\n",
      "#saco el genero que es lo que quiero predecir\n",
      "y = data.genero\n",
      "\n",
      "#cambio Sci-Fi por SciFi asi no me lo corta cuando hago split\n",
      "#lo convierto multilabel con un split\n",
      "y_ml = [ it.replace('Sci-Fi', 'SciFi').split('-') for it in y]\n",
      "    \n",
      "#lo convierto a binario\n",
      "from sklearn.preprocessing import MultiLabelBinarizer\n",
      "mlb = MultiLabelBinarizer()\n",
      "y_ml = mlb.fit_transform(y_ml)\n",
      "\n",
      "print mlb.classes_\n",
      "#elimino genero de data\n",
      "del data['genero']"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "['Action' 'Adventure' 'Animation' 'Biography' 'Comedy' 'Crime'\n",
        " 'Documentary' 'Drama' 'Family' 'Fantasy' 'Film' 'History' 'Horror' 'Music'\n",
        " 'Musical' 'Mystery' 'Noir' 'Romance' 'SciFi' 'Short' 'Show' 'Sport' 'Talk'\n",
        " 'Thriller' 'War' 'Western']\n"
       ]
      }
     ],
     "prompt_number": 11
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Elija dos algoritmos de aprendizaje autom\u00e1tico, entrene ambos modelos e intente obtener los mejores resultados posibles. Tener en cuenta que una pel\u00edcula puede tener m\u00e1s de un genero, por lo cual el clasificador debe de poder devolver m\u00e1s de una etiqueta. Puede ver algunas referencias en la documentacion de [scikit-learn](http://scikit-learn.org/stable/modules/multiclass.html):"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "\n",
      "**COMENTARIO** Elegimos LinearSVC que es popular en clasificaci\u00f3n de texto y MultinomialNB que es recomendado para clasificaci\u00f3n de texto y conteo de palabras[5].\n",
      " "
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "#dividimos en test y training\n",
      "from sklearn.cross_validation import train_test_split\n",
      "X_train, X_test, y_train, y_test = train_test_split(X, y_ml, test_size=0.25, random_state=0)\n",
      "\n",
      "from sklearn.feature_extraction.text import CountVectorizer\n",
      "from sklearn.feature_extraction.text import TfidfTransformer\n",
      "from sklearn.pipeline import Pipeline\n",
      "from sklearn.naive_bayes import  MultinomialNB\n",
      "from sklearn.multiclass import OneVsRestClassifier,OneVsOneClassifier\n",
      "from sklearn.svm import LinearSVC\n",
      "\n",
      "classifier_SVC = Pipeline([\n",
      "  ('vect', CountVectorizer(stop_words='english')),\n",
      "  ('tfidf', TfidfTransformer()),\n",
      "  ('clf', OneVsRestClassifier(LinearSVC(random_state=0)))])\n",
      "\n",
      "classifier_NB = Pipeline([\n",
      "  ('vect', CountVectorizer(stop_words='english')),\n",
      "  ('tfidf', TfidfTransformer()),\n",
      "  ('clf', OneVsRestClassifier(MultinomialNB()))])\n",
      "\n",
      "argumento_vect = CountVectorizer(max_features=100)\n",
      "argumento_counts = argumento_vect.fit_transform(X_train)\n",
      "clf = OneVsRestClassifier(MultinomialNB()).fit(argumento_counts, y_train)\n",
      "\n",
      "X_new_counts = argumento_vect.transform(X_test)\n",
      "predicted = clf.predict(X_new_counts)\n"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 12
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Imprima los mejores resultados de precision, recall y accuracy para los algoritmos seleccionados:"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "** COMENTARIO ** Para poder encontrar los mejores resultados vamos a utilizar un \"grid search\" para ajustar los par\u00e1metros \u00f3ptimamente."
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "from sklearn.metrics import classification_report,f1_score,precision_score,accuracy_score, make_scorer, recall_score,precision_recall_fscore_support\n",
      "from sklearn.grid_search import GridSearchCV\n",
      "from sklearn.linear_model import SGDClassifier\n",
      "\n",
      "def get_best_parameters (classifier, name) :\n",
      "    parameters =  {'vect__ngram_range': [(1, 1), (2, 2)],\n",
      "                   'vect__max_df': [0.5, 1.0],\n",
      "                   'vect__min_df': [1,3],\n",
      "                   'vect__max_features': [200,500],\n",
      "    }\n",
      "    \n",
      "    gs_clf = GridSearchCV(\n",
      "                    classifier,\n",
      "                    parameters,\n",
      "                    scoring=make_scorer(recall_score)\n",
      "    )\n",
      "    #recorremos un espacio chico para acelerar la busqueda\n",
      "    gs_clf = gs_clf.fit(X_train[:100],(y_train[:100]))\n",
      "\n",
      "    print \"\"\n",
      "    print \"MEJORES PARAMETROS PARA \"+name\n",
      "    print \"\"        \n",
      "        \n",
      "    best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])\n",
      "    for param_name in sorted(parameters.keys()):\n",
      "         print(\"%s: %r\" % (param_name, best_parameters[param_name]))\n",
      "\n",
      "\n",
      "get_best_parameters(classifier_NB, \"NB\")\n",
      "\n",
      "classifier_NB = Pipeline([\n",
      "  ('vect', CountVectorizer(stop_words='english', max_features= 500, max_df= 0.5,min_df= 1,ngram_range= (1, 1) )),\n",
      "  ('tfidf', TfidfTransformer()),\n",
      "  ('clf', OneVsRestClassifier(MultinomialNB()))])\n",
      "classifier_NB.fit(X_train, y_train)\n",
      "predicted_NB = classifier_NB.predict(X_test)\n",
      "\n",
      "print\n",
      "print\n",
      "print \"*\"\n",
      "print \"REPORTE DEL NB CON LOS MEJORES PARAMETROS\"\n",
      "print \"*\"\n",
      "print(classification_report(y_test, predicted_NB,mlb.classes_))\n",
      "print accuracy_score(y_test, predicted_NB)\n",
      "print \"------------------------------------------------------ \\n\"\n",
      "\n",
      "get_best_parameters(classifier_SVC, \"SVC\")\n",
      "\n",
      "\n",
      "classifier_SVC = Pipeline([\n",
      "  ('vect', CountVectorizer(stop_words='english',max_features= 200,max_df= 0.5,min_df= 3,ngram_range= (1, 1) )),\n",
      "  ('tfidf', TfidfTransformer()),\n",
      "  ('clf', OneVsRestClassifier(LinearSVC(random_state=0)))])\n",
      "classifier_SVC.fit(X_train, y_train)\n",
      "predicted_SVC = classifier_SVC.predict(X_test)\n",
      "\n",
      "print \"*\"\n",
      "print \"REPORTE DEL SVC CON LOS MEJORES PARAMETROS\"\n",
      "print \"*\"\n",
      "print(classification_report(y_test, predicted_SVC,mlb.classes_))\n",
      "print accuracy_score(y_test, predicted_SVC)\n",
      "print \"------------------------------------------------------ \\n\"\n"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stderr",
       "text": [
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 6 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 10 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 16 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 20 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 22 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\metrics.py:1773: UndefinedMetricWarning: Recall is ill-defined and being set to 0.0 in labels with no true samples.\n",
        "  'recall', 'true', average, warn_for)\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 11 is present in all training examples.\n",
        "  str(classes[c]))\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stderr",
       "text": [
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 14 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 24 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 19 is present in all training examples.\n",
        "  str(classes[c]))\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "MEJORES PARAMETROS PARA NB\n",
        "\n",
        "vect__max_df: 0.5\n",
        "vect__max_features: 500\n",
        "vect__min_df: 1\n",
        "vect__ngram_range: (1, 1)\n",
        "\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stderr",
       "text": [
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\multiclass.py:57: UserWarning: Label not 21 is present in all training examples.\n",
        "  str(classes[c]))\n",
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\metrics.py:1771: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in labels with no predicted samples.\n",
        "  'precision', 'predicted', average, warn_for)\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "*\n",
        "REPORTE DEL NB CON LOS MEJORES PARAMETROS\n",
        "*\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "     Action       0.57      0.14      0.22        59\n",
        "  Adventure       0.75      0.06      0.11        52\n",
        "  Animation       0.00      0.00      0.00         9\n",
        "  Biography       0.00      0.00      0.00        13\n",
        "     Comedy       0.69      0.40      0.50       136\n",
        "      Crime       1.00      0.18      0.31        71\n",
        "Documentary       0.00      0.00      0.00         7\n",
        "      Drama       0.64      0.84      0.72       199\n",
        "     Family       0.00      0.00      0.00        40\n",
        "    Fantasy       0.00      0.00      0.00        29\n",
        "       Film       0.00      0.00      0.00         2\n",
        "    History       0.00      0.00      0.00        10\n",
        "     Horror       0.00      0.00      0.00        20\n",
        "      Music       0.00      0.00      0.00        12\n",
        "    Musical       0.00      0.00      0.00        12\n",
        "    Mystery       0.00      0.00      0.00        27\n",
        "       Noir       0.00      0.00      0.00         2\n",
        "    Romance       1.00      0.07      0.12        91\n",
        "      SciFi       1.00      0.22      0.36        41\n",
        "      Short       0.00      0.00      0.00         3\n",
        "       Show       0.00      0.00      0.00         0\n",
        "      Sport       0.00      0.00      0.00         9\n",
        "       Talk       0.00      0.00      0.00         0\n",
        "   Thriller       0.58      0.28      0.38        68\n",
        "        War       0.00      0.00      0.00        13\n",
        "    Western       0.00      0.00      0.00         8\n",
        "\n",
        "avg / total       0.57      0.30      0.33       933\n",
        "\n",
        "0.118155619597\n",
        "------------------------------------------------------ \n",
        "\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "MEJORES PARAMETROS PARA SVC\n",
        "\n",
        "vect__max_df: 0.5\n",
        "vect__max_features: 200\n",
        "vect__min_df: 3\n",
        "vect__ngram_range: (1, 1)\n",
        "*"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "REPORTE DEL SVC CON LOS MEJORES PARAMETROS\n",
        "*\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "     Action       0.46      0.27      0.34        59\n",
        "  Adventure       0.50      0.23      0.32        52\n",
        "  Animation       0.00      0.00      0.00         9\n",
        "  Biography       0.20      0.08      0.11        13\n",
        "     Comedy       0.58      0.51      0.54       136\n",
        "      Crime       0.64      0.38      0.48        71\n",
        "Documentary       1.00      0.14      0.25         7\n",
        "      Drama       0.67      0.73      0.70       199\n",
        "     Family       0.42      0.20      0.27        40\n",
        "    Fantasy       0.33      0.14      0.20        29\n",
        "       Film       0.00      0.00      0.00         2\n",
        "    History       0.00      0.00      0.00        10\n",
        "     Horror       0.62      0.25      0.36        20\n",
        "      Music       0.00      0.00      0.00        12\n",
        "    Musical       0.00      0.00      0.00        12\n",
        "    Mystery       0.46      0.22      0.30        27\n",
        "       Noir       0.00      0.00      0.00         2\n",
        "    Romance       0.63      0.35      0.45        91\n",
        "      SciFi       0.93      0.32      0.47        41\n",
        "      Short       0.00      0.00      0.00         3\n",
        "       Show       0.00      0.00      0.00         0\n",
        "      Sport       0.50      0.22      0.31         9\n",
        "       Talk       0.00      0.00      0.00         0\n",
        "   Thriller       0.46      0.44      0.45        68\n",
        "        War       0.00      0.00      0.00        13\n",
        "    Western       0.00      0.00      0.00         8\n",
        "\n",
        "avg / total       0.54      0.40      0.44       933\n",
        "\n",
        "0.123919308357\n",
        "------------------------------------------------------ \n",
        "\n"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stderr",
       "text": [
        "C:\\Users\\Tonga\\Anaconda\\lib\\site-packages\\sklearn\\metrics\\metrics.py:1773: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in labels with no true samples.\n",
        "  'recall', 'true', average, warn_for)\n"
       ]
      }
     ],
     "prompt_number": 13
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Entrene nuevamente ambos algoritmos, pero ahora utilice todos los atributos disponibles (t\u00edtulo, director, elenco y argumento). Intente obtener los mejores resultados posibles. Tenga en cuenta que algunos de los atributos puede tener los valores incompletos para alguanas instancias:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "\n",
      "from scipy.sparse import hstack\n",
      "\n",
      "#Ponemos (2,2) un biagrama por que los nombres de los actores estan como nombre y apellido. Asi que son 2 palabras\n",
      "\n",
      "count_vect = CountVectorizer(stop_words='english', max_features= 500)\n",
      "titulo_counts = count_vect.fit_transform(data['titulo'])\n",
      "\n",
      "count_vect = CountVectorizer(stop_words='english', max_features= 500, ngram_range= (2, 2))\n",
      "director_counts = count_vect.fit_transform(data['director'])\n",
      "\n",
      "count_vect = CountVectorizer(stop_words='english', max_features= 500,ngram_range= (2, 2))\n",
      "elenco_counts = count_vect.fit_transform(data['elenco'])\n",
      "\n",
      "count_vect = CountVectorizer(stop_words='english', max_features= 500)\n",
      "argumento_counts = count_vect.fit_transform(data['argumento'])\n",
      "\n",
      "\n",
      "# Creo el arreglo vacio. La idea es que en cada fila esten los campos de cada uno de los counts generados\n",
      "# La cantidad de lineas se mantiene.\n",
      "size1 = titulo_counts.shape[0]\n",
      "size2 = titulo_counts.shape[1] + director_counts.shape[1] + elenco_counts.shape[1] + argumento_counts.shape[1]\n",
      "resul = np.empty((size1,size2))\n",
      "\n",
      "# Completo la matriz general a partir de los counts de cada campo\n",
      "for i in range (0,elenco_counts.shape[0]):\n",
      "    indice = 0\n",
      "    for j in range (0,titulo_counts.shape[1]):            \n",
      "        resul[i,indice] = titulo_counts[i,j]\n",
      "        if np.isnan(resul[i,indice]):\n",
      "            resul[i,indice] = 0\n",
      "        indice = indice + 1\n",
      "\n",
      "    for j in range (0,director_counts.shape[1]):            \n",
      "        resul[i,indice] = director_counts[i,j]\n",
      "        if np.isnan(resul[i,indice]):\n",
      "            resul[i,indice] = 0\n",
      "        indice = indice + 1\n",
      "        \n",
      "    for j in range (0,elenco_counts.shape[1]):            \n",
      "        resul[i,indice] = elenco_counts[i,j]\n",
      "        if np.isnan(resul[i,indice]):\n",
      "            resul[i,indice] = 0\n",
      "        indice = indice + 1\n",
      "        \n",
      "    for j in range (0,argumento_counts.shape[1]):            \n",
      "        resul[i,indice] = argumento_counts[i,j]\n",
      "        if np.isnan(resul[i,indice]):\n",
      "            resul[i,indice] = 0\n",
      "        indice = indice + 1"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [],
     "prompt_number": 14
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**PREGUNTA: Justifique las transformaciones que realiz\u00f3 a cada uno de los atributos.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**RESPUESTA:**\n",
      "\n",
      "En este ejercicio probamos distintos m\u00e9todos algunos que no funcionaron y otros que tuvieron baja performance. La idea es pasar los atributos de texto a atributos num\u00e9ricos que el clasificador comprenda. Aqu\u00ed un resumen:\n",
      "\n",
      "**M\u00c9TODO 1:** Utilizar el countVectorizer para cada atributo t\u00edtulo, director, elenco, argumento) como una columna de una matriz y pasarla al clasificador.\n",
      "Este m\u00e9todo era el m\u00e1s directo pero no funcion\u00f3 por problemas con las dimensiones de las columnas y matrices y lo que espera la funci\u00f3n fit del clasificador.\n",
      "Aunque realizamos varias conversiones no logramos que funcionara.\n",
      "\n",
      "**M\u00c9TODO 2:** Concatenar todos los atributos en uno solo y luego hacer CountVectorizer, esto funcion\u00f3 pero la performance era muy baja, nuestra conclusi\u00f3n es que\n",
      "las palabras del argumento dominaban a las dem\u00e1s y estas no tienen peso suficiente para mejorarlo. Es decir las palabras que m\u00e1s aparecen son las de argumento, y no los nombres del director por ejemplo.\n",
      "\n",
      "** M\u00c9TODO 3:** Usamos una t\u00e9cnica similar a la del lab1, es decir pensar el nombre del director o del actor como una categor\u00eda, y luego utilizando labelencoder pasarla a un atributo num\u00e9rico. Hicimos la prueba solo para elenco y director. El recall que dio fue similar a lo que da la parte anterior que pide solo con argumento, alrededor de 30%. Esto suponemos quiere decir que podemos predecir el g\u00e9nero de una pel\u00edcula sabiendo el director y elenco).\n",
      "Esta t\u00e9cnica consume muchos recursos ya que son muchos actores y directores, y por cada uno crea una columna nueva. No logramos concatenar diferentes tipo de features en el clasificador, por ejemplo este m\u00e9todo junto con el \"bag of words\" (CountVectorizer) aunque probamos algunas t\u00e9cnicas como FeatureUnion.\n",
      "\n",
      "**M\u00c9TODO 4:** \u00c9ste es el actual algoritmo que usamos, utilizamos el countVectorizer para cada atributo y luego realizamos el procesamiento en una matriz. La idea es que en cada fila est\u00e9n los campos de cada uno de los counts generados. Para los casos en que el resultado del count en una celda de la matriz es NaN, se sustituye por un cero en ese lugar.\n",
      "\n",
      "**ELECCI\u00d3N DE ATRIBUTOS ** Dado que es un proceso intenso, no realizamos un Grid-search para buscar los atributos \u00f3ptimos sino que probamos varios manualmente hasta dar con una buena combinaci\u00f3n, bas\u00e1ndonos en los siguientes argumentos:\n",
      "\n",
      "* max_features: La cantidad ideal es alrededor de 500, que tiene mejor rendimiento que 200. Para valores m\u00e1s altos ya no se nota tanto la diferencia y adem\u00e1s es m\u00e1s lento.\n",
      "\n",
      "* ngram_range: Tanto para director como para elenco encontramos que la cantidad de palabras ideal es 2. Esto es porque el nombre del director y actor viene dado por dos palabras, el nombre y el apellido. El t\u00edtulo y argumento funciona mejor con una palabra.\n",
      "\n",
      "* max_df y min_df: Utilizamos los valores por defecto ya que para acotar la cantidad de features ya tenemos al max_features.\n",
      "\n",
      "* stop_words: Muy importante utilizar esto, sobre todo en argumento para eliminar palabras que se repiten mucho."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "Imprima los mejores resultados de precision, recall y accuracy para los algoritmos seleccionados:"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "X_train, X_test, y_train, y_test = train_test_split(resul, y_ml, test_size=0.25, random_state=0)\n",
      "\n",
      "classifier_NB = Pipeline([\n",
      "  ('clf', OneVsRestClassifier(MultinomialNB()))])\n",
      "classifier_NB.fit(X_train, y_train)\n",
      "classifier_NB = classifier_NB.predict(X_test)\n",
      "\n",
      "print \"REPORTE DEL NB CON LOS MEJORES PARAMETROS\"\n",
      "print(classification_report(y_test, classifier_NB,mlb.classes_))\n",
      "print accuracy_score(y_test, classifier_NB)\n",
      "\n",
      "presicion, recall, f1, support = precision_recall_fscore_support(y_test,classifier_NB,1,mlb.classes_ )\n",
      "\n",
      "classifier_SVC = Pipeline([\n",
      "  ('clf', OneVsRestClassifier(LinearSVC(random_state=0)))])\n",
      "classifier_SVC.fit(X_train, y_train)\n",
      "predicted_SVC = classifier_SVC.predict(X_test)\n",
      "\n",
      "print \"REPORTE DEL SVC CON LOS MEJORES PARAMETROS\"\n",
      "print(classification_report(y_test, predicted_SVC,mlb.classes_))\n",
      "print accuracy_score(y_test, predicted_SVC)\n",
      "\n",
      "print \"\"\n",
      "print \"\"\n",
      "print(\"--- %s Tiempo ejecucion, segundos ---\" % ((time.clock() - start_time)))"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "REPORTE DEL NB CON LOS MEJORES PARAMETROS\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "     Action       0.49      0.58      0.53        59\n",
        "  Adventure       0.49      0.48      0.49        52\n",
        "  Animation       0.18      0.22      0.20         9\n",
        "  Biography       0.19      0.31      0.24        13\n",
        "     Comedy       0.64      0.68      0.66       136\n",
        "      Crime       0.61      0.66      0.64        71\n",
        "Documentary       0.25      0.14      0.18         7\n",
        "      Drama       0.70      0.80      0.74       199\n",
        "     Family       0.38      0.33      0.35        40\n",
        "    Fantasy       0.31      0.28      0.29        29\n",
        "       Film       0.00      0.00      0.00         2\n",
        "    History       0.10      0.10      0.10        10\n",
        "     Horror       0.19      0.15      0.17        20\n",
        "      Music       0.12      0.08      0.10        12\n",
        "    Musical       0.17      0.17      0.17        12\n",
        "    Mystery       0.34      0.44      0.39        27\n",
        "       Noir       0.00      0.00      0.00         2\n",
        "    Romance       0.49      0.62      0.54        91\n",
        "      SciFi       0.69      0.61      0.65        41\n",
        "      Short       0.00      0.00      0.00         3\n",
        "       Show       0.00      0.00      0.00         0\n",
        "      Sport       0.12      0.11      0.12         9\n",
        "       Talk       0.00      0.00      0.00         0\n",
        "   Thriller       0.42      0.62      0.50        68\n",
        "        War       0.17      0.15      0.16        13\n",
        "    Western       0.09      0.12      0.11         8\n",
        "\n",
        "avg / total       0.51      0.57      0.54       933\n",
        "\n",
        "0.115273775216\n",
        "REPORTE DEL SVC CON LOS MEJORES PARAMETROS"
       ]
      },
      {
       "output_type": "stream",
       "stream": "stdout",
       "text": [
        "\n",
        "             precision    recall  f1-score   support\n",
        "\n",
        "     Action       0.42      0.36      0.39        59\n",
        "  Adventure       0.38      0.27      0.31        52\n",
        "  Animation       0.25      0.22      0.24         9\n",
        "  Biography       0.11      0.15      0.13        13\n",
        "     Comedy       0.53      0.56      0.54       136\n",
        "      Crime       0.48      0.42      0.45        71\n",
        "Documentary       0.00      0.00      0.00         7\n",
        "      Drama       0.67      0.66      0.66       199\n",
        "     Family       0.48      0.35      0.41        40\n",
        "    Fantasy       0.39      0.31      0.35        29\n",
        "       Film       1.00      0.50      0.67         2\n",
        "    History       0.25      0.20      0.22        10\n",
        "     Horror       0.37      0.35      0.36        20\n",
        "      Music       0.20      0.17      0.18        12\n",
        "    Musical       0.38      0.25      0.30        12\n",
        "    Mystery       0.32      0.26      0.29        27\n",
        "       Noir       1.00      0.50      0.67         2\n",
        "    Romance       0.49      0.41      0.45        91\n",
        "      SciFi       0.70      0.46      0.56        41\n",
        "      Short       0.50      0.33      0.40         3\n",
        "       Show       0.00      0.00      0.00         0\n",
        "      Sport       0.17      0.11      0.13         9\n",
        "       Talk       0.00      0.00      0.00         0\n",
        "   Thriller       0.35      0.46      0.40        68\n",
        "        War       0.43      0.23      0.30        13\n",
        "    Western       0.00      0.00      0.00         8\n",
        "\n",
        "avg / total       0.49      0.44      0.46       933\n",
        "\n",
        "0.121037463977\n",
        "\n",
        "\n",
        "--- 419.438295753 Tiempo ejecucion, segundos ---\n"
       ]
      }
     ],
     "prompt_number": 15
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**PREGUNTA: Con cu\u00e1l de los dos conjuntos de atributos de partida (solo argumento o t\u00edtulo, director elenco y argumento) obtuvo los mejores resultados? Analice los resultados obtenidos.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**RESPUESTA:**\n",
      "\n",
      "Un n\u00famero importante a tener en cuenta en la comparaci\u00f3n es el support que nos indica el n\u00famero de ocurrencias de cada instancia en el conjunto. Con esto descubrimos que hay algunos g\u00e9neros que aparecen muy pocas veces o ninguna vez, esto producir\u00e1 que el entrenamiento sea deficiente. Por ejemplo los que tienen menos de 5 en support son: Film, Noir, Short, Show y Talk que por tanto van a tener un valor muy bajo o nulo de precisi\u00f3n o recall.\n",
      "\n",
      "Esto mejora cuando se agregan los dem\u00e1s atributos logrando buenos score de recall aun para casos en donde hay poco support.\n",
      "\n",
      "\n",
      "\n",
      "Opci\u00f3n 1 se refiere al caso en donde solo usamos el argumento como atributo.\n",
      "Opci\u00f3n 2 el caso en que usamos todos los atributos\n",
      "\n",
      "**COMPARACI\u00d3N MultinomialNB **\n",
      "\n",
      "<table>\n",
      "    <tr>\n",
      "        <td></td> <td>Opcion 1</td> <td> Opcion 2</td>\n",
      "   </tr>\n",
      "    <tr>\n",
      "        <td>Precision</td> <td>0.57</td> <td> 0.51</td>\n",
      "    </tr>  \n",
      "   <tr>\n",
      "        <td>Recall</td> <td> 0.30</td> <td> 0.57</td>\n",
      "    </tr>\n",
      "   <tr>\n",
      "        <td>f1-score</td> <td> 0.33</td> <td> 0.54</td>\n",
      "    </tr>    \n",
      "   <tr>\n",
      "        <td>Exactitud</td> <td>0.11</td> <td>0.11</td>\n",
      "   </tr>\n",
      "</table>\n",
      "\n",
      "La exactitud es similar en ambas opciones, pero la opci\u00f3n 2 saca ventaja tanto en recall como en f1-score. Adem\u00e1s analizando el resultado para cada g\u00e9nero se logra un aumento del recall en casi todos los g\u00e9neros, aun en aquellos que ten\u00edan 0.\n",
      "Por tanto claramente tener m\u00e1s atributos genera mejor resultado.\n",
      "\n",
      "**COMPARACION LinearSVC **\n",
      "\n",
      "<table>\n",
      "    <tr>\n",
      "        <td></td> <td>Opcion 1</td> <td> Opcion 2</td>\n",
      "   </tr>\n",
      "    <tr>\n",
      "        <td>Precision</td> <td>0.54</td> <td> 0.49</td>\n",
      "    </tr>  \n",
      "   <tr>\n",
      "        <td>Recall</td> <td> 0.40</td> <td> 0.44</td>\n",
      "    </tr>\n",
      "   <tr>\n",
      "        <td>f1-score</td> <td> 0.44</td> <td> 0.46</td>\n",
      "    </tr>    \n",
      "   <tr>\n",
      "        <td>Exactitud</td> <td>0.12</td> <td>0.12</td>\n",
      "   </tr>\n",
      "</table>\n",
      "\n",
      "Otra vez la exactitud es similar, y se mejora el recall y f1-score pero no en un porcentaje tan grande como en el caso anterior.\n",
      "Analizando cada g\u00e9nero vemos que tambi\u00e9n se mejor\u00f3 el recall para cada g\u00e9nero aun cuando el anterior era 0.\n",
      "\n",
      "Hay que tener en cuenta que estos promedios no tienen en cuenta los 0, si as\u00ed fuera la diferencia ser\u00eda mucho m\u00e1s apreciable en favor de la opci\u00f3n 2."
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**PREGUNTA: Escriba las conclusiones generales que haya obtenido de la tarea.**"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "**RESPUESTA:**\n",
      "\n",
      "**Clasificaci\u00f3n Texto**\n",
      "\n",
      "\n",
      "Este ejercicio presenta ciertas caracter\u00edsticas que lo hacen complejo, por ejemplo tiene m\u00faltiples etiquetas por lo que hay que procesar de forma distinta, tambi\u00e9n tenemos que para algunos g\u00e9neros no existen suficiente n\u00famero de instancias lo que repercute en el entrenamiento, y por \u00faltimo para la combinaci\u00f3n de varios atributos tuvimos que probar diferentes m\u00e9todos.\n",
      "\n",
      "Esto hace que los valores promedio no sean muy buenos, pero mirando m\u00e1s al detalle por cada g\u00e9nero tenemos que se logra buena performance para alguno de ellos por ejemplo Drama con un 80% y Comedia con un 68% de recall que coincide que son los generos con mayor n\u00famero de support (cantidad de instancias que tienen esa etiqueta), de m\u00e1s de 100.\n",
      "\n",
      "En la parte de clasificacion de imagen traemos solo aquellos que tienen por lo menos 50 im\u00e1genes de su rostro, esta t\u00e9cnica es interesante y se podr\u00eda haber aplicado al clasificador de texto ya que uno de sus problemas es el escaso (o nulo) n\u00famero de pel\u00edculas para algun g\u00e9nero en particular, que en alg\u00fan caso genera warnings porque no existen datos para el g\u00e9nero y no puede predecir.\n",
      "\n",
      "Por tanto cuando se tiene una buena cantidad de datos para realizar el entrenamiento la t\u00e9cnica de extracci\u00f3n de atributos basado en la cantidad de palabras parece brindar buenos resultados. Creemos que si tuvi\u00e9ramos m\u00e1s instancias de cada g\u00e9nero el resultado global ser\u00eda mejor.\n",
      "\n",
      "Para probar este punto en la siguiente celda mostramos una gr\u00e1fica con la tendencia. Se aprecia claramente que al aumentar el support aumenta el recall.\n"
     ]
    },
    {
     "cell_type": "code",
     "collapsed": false,
     "input": [
      "\n",
      "x= support\n",
      "y= recall\n",
      "fit = np.polyfit(x,y,1)\n",
      "fit_fn = np.poly1d(fit) \n",
      "\n",
      "pl.plot(x,y,'bo', x, fit_fn(x), '--k')\n",
      "pl.xlabel('Support')\n",
      "pl.ylabel('Recall')\n"
     ],
     "language": "python",
     "metadata": {},
     "outputs": [
      {
       "metadata": {},
       "output_type": "pyout",
       "prompt_number": 16,
       "text": [
        "<matplotlib.text.Text at 0x1928f550>"
       ]
      },
      {
       "metadata": {},
       "output_type": "display_data",
       "png": "iVBORw0KGgoAAAANSUhEUgAAAYcAAAEPCAYAAACp/QjLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xt8VOX17/HPItxFRESsBdQK/KxYLdaKtlqIokChSi1W\niuXUWls9Vi7aY1tR1Cil2vbUaqiIUu+lgsUbJR4MB0xDPd5QBC9oJZS7BER+qBEiJOv8MZNhksll\nSGZnz+X7fr3mRZ49e/YsNsOsPPt5nrXN3REREYnXJuwAREQk/Sg5iIhIAiUHERFJoOQgIiIJlBxE\nRCSBkoOIiCQINDmY2QNmVm5mbzbw/A/NbKWZrTKzF8zspCDjERGR5ATdc3gQGNHI82uBwe5+EjAN\nuC/geEREJAmBJgd3XwbsbOT5F919V7T5MtA7yHhERCQ56TTmcBnwbNhBiIgItA07AAAzOwv4CXBG\n2LGIiEgaJIfoIPRsYIS713sJysxUAEpEpBnc3ZrzulAvK5nZUcCTwHh3X9PYvu6uR4oeN998c+gx\nZMtD51LnM50fLRFoz8HMHgOGAD3MbCNwM9AOwN3vBW4CDgXuMTOAve4+KMiYRESkaYEmB3cf18Tz\nPwV+GmQMIiJy4NJptpK0kvz8/LBDyBo6l6ml85k+rKXXpVqDmXkmxCkikk7MDM/EAWkREUlPSg4i\nIpJAyUFERBIoOYiISAIlBxERSaDkICIiCZQcREQkgZKDiIgkUHIQEZEESg4iIpJAyUFERBIoOYiI\nSAIlBxERSaDkICIiCZQcREQkgZKDiIgkUHIQEZEESg4iIpJAyUFERBIoOYiISAIlBxGRLPHoo49i\nZtx2220tPpa5ewpCCpaZeSbEKSISFjMD4Etf+hKvv/463bp1w8xwd2vO8dRzEBHJAgsWLGDr1q2s\nXbuWbt26tfh46jmIiGQp9RxERHLA6tWrMTM2b94c+Hu1DfwdRESkRdavX88xxxwTa+/evTvw9wy0\n52BmD5hZuZm92cg+hWb2vpmtNLOTg4xHRCSTlJeX07Vr11hiKCkpwd3p169f4O8d9GWlB4ERDT1p\nZiOBfu7eH7gcuCfgeEREMsYjjzzCJ598wjPPPIO7M2TIkFZ778AHpM3sGOAf7n5iPc/NAp5393nR\n9rvAEHcvr7OfBqRFRA5QJg9I9wI2xrU3Ab1DikVEJBSff/4527ZtCzuMWsJODgB1s5q6CCKSE6qq\nqhg7diwdOnTglFNOCTucWsKerbQZ6BPX7h3dlqCgoCD2c35+Pvn5+UHGJSISGHdnwoQJzJw5E4DJ\nkyfzpz/9qcXHLSkpoaSkpMXHgfDHHEYCE9x9pJmdDtzp7qfXs5/GHEQkK1RUVNClSxcAxo8fz8MP\nP0ybNsFcxGnJmEOgPQczewwYAvQws43AzUA7AHe/192fNbORZrYGqAAuDTIeEZGwHXTQQdx+++38\n4he/oF27dmGH0yCVzxARyVKZPFtJRCQrPf300xx//PFhh9FsYQ9Ii4hklaVLlzJ06FAADj/8cNw9\nVk47kyg5iIikwKuvvsqgQYNi7Q0bNtCnT59GXpHeNOYgIpICNb2Dd999l+OOOy7kaCLSdraSiEiu\nyLZfYDUgLSJyAHbu3Bl2CK1CyUFEJAk7d+6kT58+dO/enQULFoQdTuCUHEREGlFRUcHAgQPp3r07\nmzZtYs6cOZx//vlhhxU4JQcRkQY8+uijdOnShZUrV3L33Xfj7lx88cVhh9UqNFtJRKQBW7Zs4YEH\nHmDq1Klhh9IsLZmtpOQgIpKlVD5DRKQFpk6dyrRp08IOI60oOYhIzrrjjjswM6ZPn8727dvDDiet\naBGciOScBx54gMsuuwyAb3zjGyxZsoROnTqFHFV60ZiDiOSUvXv30r59e4477jheeeUVunbtGnZI\ngdGAtIjIAcjUSqkHSgPSIiL12L17d73bcyExtJSSg4hknbfeegszo3PnzlRXV4cdTkbSgLSIZI21\na9fSt2/fWHvFihW0aaPfgZtDZ01EskJ+fn4sMSxbtgx3Z+DAgSFHlbk0IC0iWWHWrFkcddRRjBw5\nMuxQ0oZmK4mISALNVhKRnFBZWcnQoUPZuHFj2KFkPSUHEUl7VVVVjBkzho4dO7J06VLee++9sEPK\nekoOIpK23J3LL7+ctm3b8uSTT3LttddSXV3NOeecE3ZoWU9TWUUkbT344IPMnj2bn/zkJ8yePVvT\nUluRBqRFJK1VV1crKTSTBqRFJOPt27ev3u1KDOHQWReRUM2fPx8zo127dmGHInECTQ5mNsLM3jWz\n983s1/U838PMFpnZG2b2lpn9OMh4RCR9FBcXY2Z8//vfp2fPnrrZTpoJbEDazPKAPwPnAJuBV81s\ngbuvjtttArDC3aeYWQ/gPTP7q7vX378UkYxXXV1NXl4eAHl5eaxfv55evXqFHFV2KCoqpbCwmMrK\ntnTo0LKv0SBnKw0C1rj7OgAzmwuMBuKTwwfASdGfuwI7lBhEslubNm0YM2YMt99+O/369Qs7nKxR\nVFTK5MnPUVY2PW7r9Ab3b0qQyaEXEL+McRNwWp19ZgNLzWwLcDBwUYDxiEiamD9/ftghZJ3CwuI6\niaFlgkwOycw9vR54w93zzawvsNjMvurun9TdsaCgIPZzfn4++fn5qYpTRAKwZcsWfvnLXzJnzpyw\nQ8kJlZVtgZLoo+WCTA6bgT5x7T5Eeg/xvkm03+PuZWb2H+A4YHndg8UnBxFJXzt27OCEE06gvLwc\ngIceekgzkVpBZIwhP/qocUuzjxfkbKXlQH8zO8bM2gNjgQV19nmXyIA1ZnYEkcSwNsCYRCQgn376\nKSeccAI9evSgvLycefPm4e5KDK1k0qRh9O17Q8qOF1jPwd33mdkE4DkgD7jf3Veb2RXR5+8Ffgs8\naGYriSSqX7n7R0HFJCLBOfjggwG49957ufzyy0OOJveMGjUYgBkzbmTPnjw6dqziueeafzyVzxCR\nlKisrKRDhw5hhyFxdLMfEWk17o5Zs75vpJWptpKIBM7d+dWvfkWbNm3405/+FHY4EjD1HESkSQUF\nBdxyS2TmywUXXMDjjz9O27aq+J/uWtJz0L+uiDTo0ksv5aGHHgJg8ODBFBcXa1whRyg5iEiCxx9/\nnLFjx8baO3bsoHv37iFGJK1NyUFEYtasWUP//v0BOPLII3nrrbeUFHKUBqRFhBdeeIHf/OY3nHHG\nGZx77rls3ryZLVu2KDHkMPUcRHLYypUrGThwIAAnnHAC69ato1OnTiFHJelAyUEkB7388sucfvrp\nsfaqVas48cQTQ4xI0o2Sg0gO2bx5M3369KFmaviSJUs4++yzQ45K0pHGHERywIcffkjPnj3p3bs3\n7s4TTzyBuysxSIOUHESy3OzZszn88MPZvn07f//733F3vve974UdlqQ5JQeRLLRnzx7OOOMMzIzP\nP/+c1157DXfnwgsvDDs0yRAacxDJIp9//jkXXHABzz77LAB/+MMfuOqqq0KOSjKRkoNIFqiuriYv\nLy/WnjJlCtOnT1f1VGk2JQeRDObuXHvttdxxxx0AdOnShV27dtGmTeqvGBcVlVJYWExlZVs6dNjH\npEnDYjeYkeyj5CCSoZ566qnYwPKYMWOYN29erd5DKhUVlTJ58nOUlU2PbSsri9ySUgkiO6lkt0iG\nmTVrFjt37mTWrFmcdNJJzJ8/P/BKqcOHT6W4+Df1bL+RRYumBfre0nwq2S0ZR5coDtzcuXMZN24c\nEBlofv/992nfvn2rvHdlZf1fFXv2BNNTkfApOUir0yWKA3PXXXdx9dVXA9CrVy9WrVrV6gXxOnTY\nV+/2jh2rWjUOaT0NjlqZ2adm9kkDj49bM0jJLoWFxbUSA0BZ2XRmzFgcUkTpadmyZZhZLDFs2bKF\nTZs2hVIpddKkYfTte0OtbX37Xs/Eiee2eizSOhrsObh7l9YMRHKHLlE0bsWKFXzta1+LtV988cVa\nRfLCUNOjmzHjRvbsyaNjxyomThyhnl4WazA5mFmjv564+0epD0dygS5R1K+iooJbbrmFP/zhDwC8\n+eabfOUrXwk5qv1GjRqsZJBDGpsM/TrwWiMPkWbRJYraNm7ciJnRpUsX8vLy2LBhA+6eVolBco+m\nskooiopKmTFjcdwlinNz7rfSbdu2cfzxx/PRR5FO+OLFiznnnHNCjkqySUumsiaVHMzsUKA/0LFm\nm7uXNucNm0PJQbLJxx9/zCGHHBJrP/HEE6qSKoEIdJ2Dmf0MmAT0AVYApwMvAioEL0lJ1zUNrR3X\n7t27Ofvss3nppZcAmDhxIoWFhYG9n0hLJLPOYTJwKvCiu59lZl8Gbgs2LGlIun7RNiRd1zS0ZlxV\nVVVMmDCBWbNmAXDHHXdwzTXXpPQ9RFLO3Rt9AMujf74BdIz+/E5Tr4vuNwJ4F3gf+HUD++QT6ZG8\nBZQ0sI+L+8KF//S+fa938Nijb9/rfeHCf4YdWoOGDbuhVrw1j+HDp2Z9XFVVVV5YWOgXX3yxH3bY\nYT51arh/Z8k90e/OJr+r63sk03PYGB1zeBpYbGY7gXVNvcjM8oA/A+cAm4FXzWyBu6+O26cbcDcw\n3N03mVmPJOLJWQ0vHrsxbXsP6bqmIci43J1rrrmGu+66C4D333+fv/71ryqfLRmlyeTg7hdEfyww\nsxKgK7AoiWMPAta4+zoAM5sLjAZWx+1zMfCEu2+KvteHSUeeg9L1i7Yx6bqmIai4hg0bxuLFkZXe\nF110EX/7298Cq5QqEqQmi76b2elm1hXA3UuAEuDkJI7dC9gY194U3RavP9DdzJ43s+Vm9j+SCTpX\npesXbWPSdU1DquO6++67MTMWL15M+/btqaysDLSEtkjQkrmsNAv4Wly7IrqtqQSRzNzTdtFjDwU6\nAy+a2Uvu/n7dHQsKCmI/5+fnk5+fn8Ths8ukScMoK7uh1qWlyBfaiBCjaly6ll1IVVxz5sxh/Pjx\nABx55JGsXLmSww8/POXxiiSjpKSEkpKSlByryXUOZvaGuw+ss22Vu5/UxOtOBwrcfUS0PQWodvff\nxe3za6CTuxdE238BFrn7/DrH8qbizBVaPJYe1q5dy1VXXcWiRYs46qijeOONNzj00EPDDkuklkAX\nwZnZU8DzwD2AAVcCZ7n7d5t4XVvgPSK9gi3AK8C4OgPSXyYyaD0c6AC8DIx193fqHEvJIWSZNoU2\nKKWlpQwZMoRvf/vbnHnmmVx11VW1FrSJpJOgb/bzP4FCYGq0vQS4vKkXufs+M5sAPAfkAfe7+2oz\nuyL6/L3u/q6ZLQJWAdXA7LqJQcKXrmsVWtPrr7/OKaecEmvPnDmTY445JryARAKm2krSpFy+ReTS\npUsZOnRorP32228zYMCAECMSSV5Leg7JzFY6zsyWmNnb0fZJZja1qddJ9sjEKbQttWHDBswslhie\neuop3F2JQXJGk8kBmA1cD3webb8JjAssIkk7mTiFtrkqKiro06cPRx99NABLlizB3fnudxsdYhPJ\nOskkh87u/nJNI3p9Z29wIUm6Sde1Cqm0a9cu5s+fz3HHHce2bdtiPYWzz1Z9SclNyQxIbzezfjUN\nM7sQ+CC4kCTdpOtahVTYvXs3Q4YM4dVXXwUiN97p3bt3yFGJhC+Zqax9gfuAbwD/DfwH+GFNWYzW\noAFpSbXPPvuMgw46KNa+8847mTx5cogRiaRe4Df7ib5JFyLrHD4FLnL3ec15w+ZQcpBUqaqq4uCD\nD2b37t0AHH/88bzzjmZPS3YKZJ1DNBlcAfQlUk57FpHCedOBNUCrJQeRlnJ32rSpPcRWXV2tSqki\nDWhsQPoR4ERgJZFVzi8B1wAXu/v5rRCbSEpMmzatVmLYu3cv7q7EINKIBi8rxddPit6b4QPgaHff\n3Yrx1cSiy0pywMaPH8+cOXMAGDp0KAsXLqRjx45NvEokewRVPiM2id3dq8xscxiJQeRA3Xrrrdx8\n882x9q5du+jatWuIEYlknsZ6DlXAZ3GbOgE1ycHdvdX+t6nnIMn4zne+Q1FREQBdunRh48aNdOvW\nLeSoRMITSPkMd89z94PjHm3jftavYZI2am60U5MYVq1axSeffKLEINICySyCE0lLy5YtY/Dg/Qvx\n3njjDb761a+GGJFI9kimfIZIWlm2bBlHHHFELDE88cQTuLsSg0gKqeeQozLx5j3r16+vdQ+F5cuX\n17rHgoikjpJDDsq0m/e8+eabnHTS/rvSPvjgg/z4xz8OLyCRHKDLSjmosLC4VmIAKCubzowZi0OK\nqH7l5eWYWSwx/PGPf8TdlRhEWoF6Djko3W/es2PHDnr06BFrn3baabz00kshRiSSe9RzyEHpevOe\nvXv3MmzYsFhiOPXUU3F3JQaRECg55KB0u3nPvn37aNu2Le3bt2fx4sVce+21uDuvvPJKKPGIyAGU\n7A6TVkinXlFRKTNmLI67ec+5rT4Y7e5ceOGFPPnkkwB8+ctf5p133lFBPJEUaZX7OYQpk5JDJk4R\nDUN8AvjhD3/II488klBSW0RaJqjCe3KAMm2KaBjGjBkT6ykAVFRU0Llz5xAjEpH66Fe1FMqUKaJh\nGD16NGYWSwy7du3C3ZUYRNKUkkMKpfsU0TDcdNNNmBkLFiwAYMOGDbi7SmiLpDldVkqhdJ0iGobH\nHnuMiy++ONZ+++23GTBgQIgRiciBUM8hhdJtimgYZs6ciZnFEsO//vUv3F2JQSTDaLZSiqXDFNEw\nzJ8/n+9///ux9ssvv8ygQYNCjEhE0nYqq5mNAO4E8oC/uPvvGtjvVOBF4CJ3f7Ke5zMmOeSahx56\niEsvvTTWnjJlCr/97W9DjEhEaqTlVFYzywP+DJwDbAZeNbMF7r66nv1+BywCtPopQ6xcuZKBAwfG\n2koKItklyAHpQcAad18HYGZzgdHA6jr7TQTmA6cGGIukyH/+8x+OPfbYWHvs2LHMnTs3xIhEJAhB\nJodewMa49ibgtPgdzKwXkYRxNpHkoGtHaWr79u307Nkz1r7wwgv5+9//HmJEIhKkIJNDMl/0dwLX\nubtbpJ5Cg5eVCgoKYj/n5+eTn5/f0vgkCR9++CGHH354rH3rrbdy4403hhiRiDSkpKSEkpKSlBwr\nsAFpMzsdKHD3EdH2FKA6flDazNayPyH0AD4DfubuC+ocSwPSrWzPnj106tSp1jb9G4hklrScrWRm\nbYH3gKHAFuAVYFzdAem4/R8E/qHZSuGqrq4mLy8vYZsqpYpknpYkh8AWwbn7PmAC8BzwDjDP3Veb\n2RVmdkVQ7yvNd95559VKDJWVlbi7EoNIDtIiOGH48OEUFxfH2p999lnCJSURyTxpuc5B0l+PHj3Y\nsWNHrP3RRx9x6KGHhhiRiKQL1VbKQV//+tcxs1hi2LZtG+6uxCAiMUoOOeQLX/gCZsZrr70GwPLl\ny3H3WlNVRURAySEnFBYWYmaUl5cDsGjRItydU045JeTIRCRdacwhi82bN48f/OAHsfbDDz/Mj370\noxAjEpFMoeSQhUpLSxkyZEis/cwzz3D++eeHGJGIZBolhwxRVFRKYWExlZVt6dBhH5MmDUu4T8SC\nBQsYPXp0rD137lzGjh3b2qGKSBZQcsgARUWlTJ78HGVl02Pbysoid5wbNWowb731FieeeGLsueHD\nh7No0aJWj1NEsocWwWWA4cOnUlz8m4Tt+flXU1JyV6x97LHHUlZW1pqhiUga0yK4LFdZWfef6ROg\nK/HFF3M5eYpI6uVkckjm+n066dBhX/SnKuL/yfLy2rNvX2UoMYlIdsu55NDU9ft0NGHCORQX1+4Z\nHnvsdRQWfjukiEQk2+XcmEND1++HD7+RRYumpeQ9UsXdadOm9jrFwYNvolOnaiZOPDdtk5mIpAeN\nORyAxOv3EXv25NW7PSx1y2RXVFTQuXPnkKIRkVyTc+Uz9l+/r61jx6pWjqR+3/zmN2slhg8//BB3\nV2IQkVaVc8lh0qRh9O17Q61tfftez8SJ56b8vYqKShk+fCr5+QUMHz6VoqLSBvcdN24cZsaLL74I\nwNq1a3F3DjvssJTHJSLSlJy7rFRznX7GjBvZsyePjh2rmDhxRMqv3yc78H3PPffw85//PNbevn07\nPXr0SGksIiIHKucGpFtLUwPfV155JbNmzYptX7VqVa1VziIiLaUB6TTU0MD3mjWv1hpT+Nvf/sa4\nceNaKywRkaTk3JhDa0kc+H4cMMrKngPg97//Pe6uxCAiaUk9h4BMmjSMsrIbKCsbDwyIbe/f/zj+\n/e93wwtMRCQJ6jkEZODAvpSV/ZaaxHDUUYNYuPCfSgwikhHUc0ixDRs2cPTRR8faY8aMYf78+SFG\nJCJy4JQcUmT79u307Nkz1v7Od77DP/7xjxAjEhFpPiWHFtq1axfdunWrtS3Tpt2KiNSVtckh6LLc\nVVVVtG1b+/RVV1cn1EQSEclEWZkcgizLXV+l1L179yYkChGRTJaVs5UKC4trJQaAsrLpzJixuEXH\nvfrqq2slhj179uDuSgwiknUC/1YzsxHAnUAe8Bd3/12d538I/AowIve/vNLdV7XkPVNdljv+UlG3\nbt1Yt24dhxxySLOOJSKSCQJNDmaWB/wZOAfYDLxqZgvcfXXcbmuBwe6+K5pI7gNOb8n7pqosd93x\ng82bN/PFL36x2XGJiGSKoC8rDQLWuPs6d98LzAVGx+/g7i+6+65o82Wgd0vftKVluc2sVmJYunQp\n7q7EICI5I+jLSr2AjXHtTcBpjex/GfBsS9+0uWW5b7nlFgoKCmLtadOmMXXq1JaGIyKScYJODklP\n+Dezs4CfAGek4o1HjRqc9Myk++67jyuuuCLWnjlzJldeeWUqwhARyUhBJ4fNQJ+4dh8ivYdazOwk\nYDYwwt131neg+N/o8/Pzyc/Pb3Fwt99+O1OmTIm1r7vuOm677bYWH1dEJAwlJSWUlJSk5FiB3uzH\nzNoC7wFDgS3AK8C4+AFpMzsKWAqMd/eXGjhOSm/288gjj3DJJZfE2jNmzGDChAkpO76ISDpI25v9\nuPs+M5sAPEdkKuv97r7azK6IPn8vcBNwKHBPdBB4r7sPCiKehQsXct5558XaBx10EJ9++mkQbyUi\nktFy4jahZWVl9OvXr9a2YcNuSHlJDRGRdJK2PYewbd26lSOPPLLO1kiSKS5OXUkNEZFsk5XlMyoq\nKjCzWolh2LAb2D95qhSYSllZOy655G6KikrDCFNEJG1lVc+hurqavLy8hG1mRn5+QXRLKZEhkEjt\npR07YPLkhnsQQVd3FRFJR1mRHNydRYsWMXLkyNi2c8+dwuTJI2IrnfeX1CimJjHUiBTluzHhSz/I\n6q4iIuks4y8rDRgwgDZt2jBy5EiOOOK7wD7AWbz4t0ye/FzsktH+khrJF+ULqrqriEi6y9jkUFP/\naPXqyJKJoUN/RXn5U0RmzEbEf5GPGjWYu+4azmGHra7vcPUW5Ut1dVcRkUyRccmhblG8rVu34u7s\n29ep3v3jv8hHjRrMww9flXRRvlRVdxURyTQZM+bwwgsvcOaZZ8bapaWlfOtb34q193+RlxIZV2gL\n7OPjj7fWOs6BFOWbNGkYZWU31Lq0FEkkI1L11xIRSUsZkxxKSko4+OCDee+99+pZuxD5Il+16jK2\nbv0C8QPOH3zwC4qKSmt9+SdblK+51V1FRDJdVq2Q/trXfs6KFTMTth922FgefvgqfamLSE5pyQrp\njBtzaEzXrj3r3b5jx/G1Zi6JiEjjsio5NDSADFWUlU3XamgRkSRlVXKo7/agcD0QmYmkHoSISHIy\nZkA6GTVjCpdcMpYdO44HqoARQM1YQ1WDq6FFRGS/rBqQrlFf2YtID6I3sIVDDtnEaaf1Vp0kEclq\nKtldR/09iN5E7lo6nV27VLJbRKQxWTXmEG//aui9wDQidylVnSQRkWRkZc+hRvwitpde2sSuXYn7\nqE6SiEiirO051Bg1ajCLFk3jtNN61/u86iSJiCTK+uRQo75prg0V3BMRyXVZOVupIUVFpcyYsTiu\nTtK5GowWkazVktlKOZUcRERyiWoriYhISik5iIhIAiUHERFJoOQgIiIJlBxERCSBkoOIiCQINDmY\n2Qgze9fM3jezXzewT2H0+ZVmdnKQ8YiISHICSw5mlgf8mcgNFQYA48zs+Dr7jAT6uXt/4HLgnqDi\nkf1KSkrCDiFr6Fymls5n+giy5zAIWOPu69x9LzAXGF1nn/OBhwHc/WWgm5kdUd/BOnceTadOZ3HQ\nQd+lXbvzOOaYnzF8+FTd1a0Z9B8wdXQuU0vnM30EWZW1F7Axrr0JOC2JfXoD5XUPtnv3M8AVwHbg\nadavh/XrdU8GEZEgBNlzSLbeRd2l3Y287l6gfa0tuieDiEjqBVZbycxOBwrcfUS0PQWodvffxe0z\nCyhx97nR9rvAEHcvr3MsFVYSEWmGdLxN6HKgv5kdQ+Q2bGOBcXX2WQBMAOZGk8l/100M0Py/nIiI\nNE9gycHd95nZBOA5IA+4391Xm9kV0efvdfdnzWykma0BKoBLg4pHRESSlxElu0VEpHWl9QrpZBbR\nSePMbJ2ZrTKzFWb2SnRbdzNbbGb/NrNiM+sWdpzpysweMLNyM3szbluD58/MpkQ/r++a2bBwok5P\nDZzLAjPbFP18rjCzb8c9p3PZCDPrY2bPm9nbZvaWmU2Kbk/J5zNtk0Myi+gkKQ7ku/vJ7j4ouu06\nYLG7/xewJNqW+j1I5DMYr97zZ2YDiIytDYi+ZqaZpe3/sRDUdy4duCP6+TzZ3f8P6FwmaS9wjbuf\nAJwOXBX9jkzJ5zOdT3Yyi+gkOXUH9GOLD6N/frd1w8kc7r4M2Flnc0PnbzTwmLvvdfd1wBoin2Oh\nwXMJiZ9P0Llskrtvdfc3oj9/CqwmsnYsJZ/PdE4O9S2Q6xVSLJnMgf9rZsvN7GfRbUfEzQorB+pd\nlS4Nauj8fZHI57SGPrPJmRitrXZ/3CUQncsDEJ0VejLwMin6fKZzctBIeWqc4e4nA98m0u38VvyT\n0Ztz61w3UxLnT+e2cfcAXwIGAh8Af2xkX53LephZF+AJYLK7fxL/XEs+n+mcHDYDfeLafaid9SQJ\n7v5B9M/GN/87AAADbElEQVTtwFNEupHlZvYFADM7EtgWXoQZqaHzV/cz2zu6TRrg7ts8CvgL+y9z\n6FwmwczaEUkMj7r709HNKfl8pnNyiC2iM7P2RAZSFoQcU0Yxs85mdnD054OAYcCbRM7jJdHdLgGe\nrv8I0oCGzt8C4Adm1t7MvgT0B14JIb6MEf3yqnEBkc8n6Fw2ycwMuB94x93vjHsqJZ/PIFdIt0hD\ni+hCDivTHAE8FfkM0RaY4+7FZrYceNzMLgPWAReFF2J6M7PHgCFADzPbCNwE3E4958/d3zGzx4F3\ngH3Az10LiWLqOZc3A/lmNpDI5Y3/EKmuqXOZnDOA8cAqM1sR3TaFFH0+tQhOREQSpPNlJRERCYmS\ng4iIJFByEBGRBEoOIiKSQMlBREQSKDmIiEgCJQfJSWZ2Q7TM8cpoqehQirqZ2dVm1imM9xZpjNY5\nSM4xs28QqeEzxN33mll3oENNqZFWjCOPSGXMr7v7jtZ8b5GmqOcguegLwIfRUvC4+0fu/kH0xkjd\nAczs62b2fPTnAjN71Mz+X/QGKj+Nbs83s1IzWxi9eco90ZIGmNm46E2W3jSz22ve2Mw+NbP/bWZv\nANcTqZT5vJktad1TINI4JQfJRcVAHzN7z8zuNrPB0e2NdaO/ApwFfAO4Ka4m0KnABCI3UOkLfM/M\nvkikhMFZRKqNnmpmNfci6Qy85O4D3X0asIXIzZiGpvDvJ9JiSg6Sc9y9AjgFuBzYDswzsx839hLg\nGXevjF7+eZ5I9VAHXonekKoaeAw4E/g6UOLuO9y9CpgD1CSgKiJVNEXSWtoW3hMJUvTL/J/AP6P3\nNP4xkWJkNb8wdWziENU1h4rbZtTf+4jfvkcF5CQTqOcgOcfM/svM+sdtOplI9cp1RH7rBxgT/xJg\ntJl1MLPDgHzg1ej2QdGy8m2IVL9cRqQM8hAzOyw66PwDIomoPp8AXVPx9xJJJfUcJBd1AWZEb0m5\nD3ifyCWmAcD9ZvYxUML+3/YdWEXkclIP4FZ332pmXyaSJP4M9AOWuvtTAGZ2XXR/Axa6+z/ijhXv\nPmCRmW3WuIOkE01lFWmCmd0MfOruf6yzPR/4X+5+XiiBiQRIl5VEklPfb1G6/7ZkLfUcREQkgXoO\nIiKSQMlBREQSKDmIiEgCJQcREUmg5CAiIgmUHEREJMH/B4CTTturU5FsAAAAAElFTkSuQmCC\n",
       "text": [
        "<matplotlib.figure.Figure at 0x192a2588>"
       ]
      }
     ],
     "prompt_number": 16
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "\n",
      "**Clasificaci\u00f3n Imagen**\n",
      "\n",
      "Probar distintos algoritmos y clasificadores aun en un espacio chico es muy importante para poder elegir cu\u00e1l nos da el mejor resultado, en nuestras pruebas existen diferencias de hasta 40% cuando se cambia el clasificador (Entre desicion tree y SVC).  \n",
      "\n",
      "Una ventaja clara en el uso de m\u00e9todos de extracci\u00f3n de atributos es la obvia reducci\u00f3n del dominio de an\u00e1lisis para los consecuentes m\u00e9todos de clasificaci\u00f3n de atributos, por lo que el tiempo de ejecuci\u00f3n total resultante disminuye considerablemente. Una ejecuci\u00f3n sin hacer uso de dichos m\u00e9todos de extracci\u00f3n de atributos nos demor\u00f3 unos 829s (13 min 49 seg) contra 37 seg.\n",
      "\n",
      "Por otro lado si analizamos la calidad de dichos clasificadores vemos que obtenemos diferentes comportamientos dependiendo del clasificador estudiado. Interesante es notar que para el mejor clasificador, SVC, la variaci\u00f3n no es significativa y se mantiene en aproximadamente 85%, denotando que los atributos extra\u00eddos por el extractor fueron los correctos, puesto que mejora la performance sin afectar el algoritmo de clasificaci\u00f3n.\n",
      "\n",
      "Sin embargo, en el caso del clasificador GaussianNB, pasa de tener una precisi\u00f3n de 18% a 65% y un f1-score de 26% a 52%. Manteni\u00e9ndose el resto de los indicadores relativamente constante para dicho clasificador. El clasificador DT si bien experimenta una mejora, no es substancial.\n",
      "\n",
      "A continuaci\u00f3n la tabla comparando el uso de extractor de atributos vs sin:\n",
      "\n",
      "<table>\n",
      "<tr><td></td><td></td><td>precision</td><td> recall</td><td>f1-score</td><td>accuracy</td></tr>\n",
      "<tr><td rowspan=3>KernelPCA </td><td>DT</td><td>0.45</td><td>0.45</td><td>0.44</td><td>0.45</td></tr>\n",
      "<tr>                             <td>SVC</td><td>0.85</td><td>0.85</td><td>0.85</td><td>0.85</td></tr>\n",
      "<tr>                             <td>Gaussian</td><td>0.18</td><td>0.43</td><td>0.26</td><td>0.43</td></tr>\n",
      "<tr><td rowspan=3>S/extractor<br/>de atributos</td><td>DT</td><td>0.47</td><td>0.51</td><td>0.47</td><td>0.51</td></tr>\n",
      "<tr>                                               <td>SVC</td><td>0.86</td><td>0.85</td><td>0.85</td><td>0.85</td></tr>\n",
      "<tr>                                               <td>Gaussian</td><td>0.65</td><td>0.48</td><td>0.52</td><td>0.48</td></tr>\n",
      "</table>\n",
      "\n"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": [
      "\n",
      "\n",
      "\n",
      "\n",
      "** Fuentes **\n",
      "+ [1] http://scikit-learn.org/0.12/auto_examples/applications/face_recognition.html\n",
      "+ [2] http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html\n",
      "\n",
      "+ [3] http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html\n",
      "+ [4] http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html#sklearn.naive_bayes.BernoulliNB\n",
      "+ [5] http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html\n",
      "+ [6] http://scikit-learn.org/stable/modules/generated/sklearn.grid_search.RandomizedSearchCV.html#sklearn.grid_search.RandomizedSearchCV\n",
      "\n"
     ]
    },
    {
     "cell_type": "markdown",
     "metadata": {},
     "source": []
    }
   ],
   "metadata": {}
  }
 ]
}